diff options
Diffstat (limited to 'contrib/llvm/tools/clang/lib')
317 files changed, 38563 insertions, 16488 deletions
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp index dddc886..f266eaf 100644 --- a/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp +++ b/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp @@ -124,7 +124,7 @@ public: } } - virtual ~CaptureDiagnosticConsumer() { + ~CaptureDiagnosticConsumer() override { assert(!HasBegunSourceFile && "FinishCapture not called!"); } @@ -432,7 +432,7 @@ public: ARCMTMacroTrackerPPCallbacks(std::vector<SourceLocation> &ARCMTMacroLocs) : ARCMTMacroLocs(ARCMTMacroLocs) { } - void MacroExpands(const Token &MacroNameTok, const MacroDirective *MD, + void MacroExpands(const Token &MacroNameTok, const MacroDefinition &MD, SourceRange Range, const MacroArgs *Args) override { if (MacroNameTok.getIdentifierInfo()->getName() == getARCMTMacroName()) ARCMTMacroLocs.push_back(MacroNameTok.getLocation()); @@ -465,7 +465,7 @@ public: if (Listener) Listener->start(ctx); } - ~RewritesApplicator() { + ~RewritesApplicator() override { if (Listener) Listener->finish(); } diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp index 52c424c..a43879c 100644 --- a/contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp +++ b/contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp @@ -48,7 +48,7 @@ class ObjCMigrateASTConsumer : public ASTConsumer { }; void migrateDecl(Decl *D); - void migrateObjCInterfaceDecl(ASTContext &Ctx, ObjCContainerDecl *D); + void migrateObjCContainerDecl(ASTContext &Ctx, ObjCContainerDecl *D); void migrateProtocolConformance(ASTContext &Ctx, const ObjCImplementationDecl *ImpDecl); void CacheObjCNSIntegerTypedefed(const TypedefDecl *TypedefDcl); @@ -245,17 +245,16 @@ namespace { (Msg->getReceiverKind() != ObjCMessageExpr::Instance && Msg->getReceiverKind() != ObjCMessageExpr::SuperInstance)) return false; + if (const Expr *Receiver = Msg->getInstanceReceiver()) + if (Receiver->getType()->isObjCBuiltinType()) + return false; + const ObjCMethodDecl *Method = Msg->getMethodDecl(); if (!Method) return false; if (!Method->isPropertyAccessor()) return false; - const ObjCInterfaceDecl *IFace = - NS.getASTContext().getObjContainingInterface(Method); - if (!IFace) - return false; - const ObjCPropertyDecl *Prop = Method->findPropertyDecl(); if (!Prop) return false; @@ -305,6 +304,10 @@ namespace { BegLoc = PP.getLocForEndOfToken(BegLoc); SourceLocation EndLoc = RHS->getLocStart(); EndLoc = EndLoc.getLocWithOffset(-1); + const char *colon = PP.getSourceManager().getCharacterData(EndLoc); + // Add a space after '=' if there is no space between RHS and '=' + if (colon && colon[0] == ':') + PropertyDotString += " "; SourceRange Range(BegLoc, EndLoc); commit.replace(Range, PropertyDotString); // remove '[' ']' @@ -465,7 +468,7 @@ static void rewriteToObjCProperty(const ObjCMethodDecl *Getter, ASTContext &Context = NS.getASTContext(); bool LParenAdded = false; std::string PropertyString = "@property "; - if (UseNsIosOnlyMacro && Context.Idents.get("NS_NONATOMIC_IOSONLY").hasMacroDefinition()) { + if (UseNsIosOnlyMacro && NS.isMacroDefined("NS_NONATOMIC_IOSONLY")) { PropertyString += "(NS_NONATOMIC_IOSONLY"; LParenAdded = true; } else if (!Atomic) { @@ -575,7 +578,7 @@ static bool IsCategoryNameWithDeprecatedSuffix(ObjCContainerDecl *D) { return false; } -void ObjCMigrateASTConsumer::migrateObjCInterfaceDecl(ASTContext &Ctx, +void ObjCMigrateASTConsumer::migrateObjCContainerDecl(ASTContext &Ctx, ObjCContainerDecl *D) { if (D->isDeprecated() || IsCategoryNameWithDeprecatedSuffix(D)) return; @@ -616,7 +619,7 @@ ClassImplementsAllMethodsAndProperties(ASTContext &Ctx, if (Property->getPropertyImplementation() == ObjCPropertyDecl::Optional) continue; HasAtleastOneRequiredProperty = true; - DeclContext::lookup_const_result R = IDecl->lookup(Property->getDeclName()); + DeclContext::lookup_result R = IDecl->lookup(Property->getDeclName()); if (R.size() == 0) { // Relax the rule and look into class's implementation for a synthesize // or dynamic declaration. Class is implementing a property coming from @@ -647,7 +650,7 @@ ClassImplementsAllMethodsAndProperties(ASTContext &Ctx, continue; if (MD->getImplementationControl() == ObjCMethodDecl::Optional) continue; - DeclContext::lookup_const_result R = ImpDecl->lookup(MD->getDeclName()); + DeclContext::lookup_result R = ImpDecl->lookup(MD->getDeclName()); if (R.size() == 0) return false; bool match = false; @@ -768,19 +771,34 @@ static void rewriteToNSMacroDecl(ASTContext &Ctx, const TypedefDecl *TypedefDcl, const NSAPI &NS, edit::Commit &commit, bool IsNSIntegerType) { - QualType EnumUnderlyingT = EnumDcl->getPromotionType(); - assert(!EnumUnderlyingT.isNull() + QualType DesignatedEnumType = EnumDcl->getIntegerType(); + assert(!DesignatedEnumType.isNull() && "rewriteToNSMacroDecl - underlying enum type is null"); PrintingPolicy Policy(Ctx.getPrintingPolicy()); - std::string TypeString = EnumUnderlyingT.getAsString(Policy); + std::string TypeString = DesignatedEnumType.getAsString(Policy); std::string ClassString = IsNSIntegerType ? "NS_ENUM(" : "NS_OPTIONS("; ClassString += TypeString; ClassString += ", "; ClassString += TypedefDcl->getIdentifier()->getName(); ClassString += ')'; - SourceRange R(EnumDcl->getLocStart(), EnumDcl->getLocStart()); + SourceLocation EndLoc; + if (EnumDcl->getIntegerTypeSourceInfo()) { + TypeSourceInfo *TSourceInfo = EnumDcl->getIntegerTypeSourceInfo(); + TypeLoc TLoc = TSourceInfo->getTypeLoc(); + EndLoc = TLoc.getLocEnd(); + const char *lbrace = Ctx.getSourceManager().getCharacterData(EndLoc); + unsigned count = 0; + if (lbrace) + while (lbrace[count] != '{') + ++count; + if (count > 0) + EndLoc = EndLoc.getLocWithOffset(count-1); + } + else + EndLoc = EnumDcl->getLocStart(); + SourceRange R(EnumDcl->getLocStart(), EndLoc); commit.replace(R, ClassString); // This is to remove spaces between '}' and typedef name. SourceLocation StartTypedefLoc = EnumDcl->getLocEnd(); @@ -902,7 +920,7 @@ bool ObjCMigrateASTConsumer::migrateNSEnumDecl(ASTContext &Ctx, const EnumDecl *EnumDcl, const TypedefDecl *TypedefDcl) { if (!EnumDcl->isCompleteDefinition() || EnumDcl->getIdentifier() || - EnumDcl->isDeprecated() || EnumDcl->getIntegerTypeSourceInfo()) + EnumDcl->isDeprecated()) return false; if (!TypedefDcl) { if (NSIntegerTypedefed) { @@ -1259,7 +1277,7 @@ void ObjCMigrateASTConsumer::migrateNsReturnsInnerPointer(ASTContext &Ctx, QualType RT = OM->getReturnType(); if (!TypeIsInnerPointer(RT) || - !Ctx.Idents.get("NS_RETURNS_INNER_POINTER").hasMacroDefinition()) + !NSAPIObj->isMacroDefined("NS_RETURNS_INNER_POINTER")) return; edit::Commit commit(*Editor); @@ -1270,9 +1288,9 @@ void ObjCMigrateASTConsumer::migrateNsReturnsInnerPointer(ASTContext &Ctx, void ObjCMigrateASTConsumer::migratePropertyNsReturnsInnerPointer(ASTContext &Ctx, ObjCPropertyDecl *P) { QualType T = P->getType(); - + if (!TypeIsInnerPointer(T) || - !Ctx.Idents.get("NS_RETURNS_INNER_POINTER").hasMacroDefinition()) + !NSAPIObj->isMacroDefined("NS_RETURNS_INNER_POINTER")) return; edit::Commit commit(*Editor); commit.insertBefore(P->getLocEnd(), " NS_RETURNS_INNER_POINTER "); @@ -1390,7 +1408,7 @@ static bool AuditedType (QualType AT) { void ObjCMigrateASTConsumer::AnnotateImplicitBridging(ASTContext &Ctx) { if (CFFunctionIBCandidates.empty()) return; - if (!Ctx.Idents.get("CF_IMPLICIT_BRIDGING_ENABLED").hasMacroDefinition()) { + if (!NSAPIObj->isMacroDefined("CF_IMPLICIT_BRIDGING_ENABLED")) { CFFunctionIBCandidates.clear(); FileId = FileID(); return; @@ -1465,16 +1483,14 @@ void ObjCMigrateASTConsumer::AddCFAnnotations(ASTContext &Ctx, RetEffect Ret = CE.getReturnValue(); const char *AnnotationString = nullptr; if (Ret.getObjKind() == RetEffect::CF) { - if (Ret.isOwned() && - Ctx.Idents.get("CF_RETURNS_RETAINED").hasMacroDefinition()) + if (Ret.isOwned() && NSAPIObj->isMacroDefined("CF_RETURNS_RETAINED")) AnnotationString = " CF_RETURNS_RETAINED"; else if (Ret.notOwned() && - Ctx.Idents.get("CF_RETURNS_NOT_RETAINED").hasMacroDefinition()) + NSAPIObj->isMacroDefined("CF_RETURNS_NOT_RETAINED")) AnnotationString = " CF_RETURNS_NOT_RETAINED"; } else if (Ret.getObjKind() == RetEffect::ObjC) { - if (Ret.isOwned() && - Ctx.Idents.get("NS_RETURNS_RETAINED").hasMacroDefinition()) + if (Ret.isOwned() && NSAPIObj->isMacroDefined("NS_RETURNS_RETAINED")) AnnotationString = " NS_RETURNS_RETAINED"; } @@ -1491,13 +1507,13 @@ void ObjCMigrateASTConsumer::AddCFAnnotations(ASTContext &Ctx, const ParmVarDecl *pd = *pi; ArgEffect AE = AEArgs[i]; if (AE == DecRef && !pd->hasAttr<CFConsumedAttr>() && - Ctx.Idents.get("CF_CONSUMED").hasMacroDefinition()) { + NSAPIObj->isMacroDefined("CF_CONSUMED")) { edit::Commit commit(*Editor); commit.insertBefore(pd->getLocation(), "CF_CONSUMED "); Editor->commit(commit); } else if (AE == DecRefMsg && !pd->hasAttr<NSConsumedAttr>() && - Ctx.Idents.get("NS_CONSUMED").hasMacroDefinition()) { + NSAPIObj->isMacroDefined("NS_CONSUMED")) { edit::Commit commit(*Editor); commit.insertBefore(pd->getLocation(), "NS_CONSUMED "); Editor->commit(commit); @@ -1582,11 +1598,10 @@ void ObjCMigrateASTConsumer::AddCFAnnotations(ASTContext &Ctx, RetEffect Ret = CE.getReturnValue(); const char *AnnotationString = nullptr; if (Ret.getObjKind() == RetEffect::CF) { - if (Ret.isOwned() && - Ctx.Idents.get("CF_RETURNS_RETAINED").hasMacroDefinition()) + if (Ret.isOwned() && NSAPIObj->isMacroDefined("CF_RETURNS_RETAINED")) AnnotationString = " CF_RETURNS_RETAINED"; else if (Ret.notOwned() && - Ctx.Idents.get("CF_RETURNS_NOT_RETAINED").hasMacroDefinition()) + NSAPIObj->isMacroDefined("CF_RETURNS_NOT_RETAINED")) AnnotationString = " CF_RETURNS_NOT_RETAINED"; } else if (Ret.getObjKind() == RetEffect::ObjC) { @@ -1600,8 +1615,7 @@ void ObjCMigrateASTConsumer::AddCFAnnotations(ASTContext &Ctx, break; default: - if (Ret.isOwned() && - Ctx.Idents.get("NS_RETURNS_RETAINED").hasMacroDefinition()) + if (Ret.isOwned() && NSAPIObj->isMacroDefined("NS_RETURNS_RETAINED")) AnnotationString = " NS_RETURNS_RETAINED"; break; } @@ -1620,7 +1634,7 @@ void ObjCMigrateASTConsumer::AddCFAnnotations(ASTContext &Ctx, const ParmVarDecl *pd = *pi; ArgEffect AE = AEArgs[i]; if (AE == DecRef && !pd->hasAttr<CFConsumedAttr>() && - Ctx.Idents.get("CF_CONSUMED").hasMacroDefinition()) { + NSAPIObj->isMacroDefined("CF_CONSUMED")) { edit::Commit commit(*Editor); commit.insertBefore(pd->getLocation(), "CF_CONSUMED "); Editor->commit(commit); @@ -1640,12 +1654,12 @@ void ObjCMigrateASTConsumer::migrateAddMethodAnnotation( MethodDecl->hasAttr<NSReturnsRetainedAttr>() || MethodDecl->hasAttr<NSReturnsNotRetainedAttr>() || MethodDecl->hasAttr<NSReturnsAutoreleasedAttr>()); - - if (CE.getReceiver() == DecRefMsg && + + if (CE.getReceiver() == DecRefMsg && !MethodDecl->hasAttr<NSConsumesSelfAttr>() && MethodDecl->getMethodFamily() != OMF_init && MethodDecl->getMethodFamily() != OMF_release && - Ctx.Idents.get("NS_CONSUMES_SELF").hasMacroDefinition()) { + NSAPIObj->isMacroDefined("NS_CONSUMES_SELF")) { edit::Commit commit(*Editor); commit.insertBefore(MethodDecl->getLocEnd(), " NS_CONSUMES_SELF"); Editor->commit(commit); @@ -1711,7 +1725,7 @@ void ObjCMigrateASTConsumer::inferDesignatedInitializers( const ObjCInterfaceDecl *IFace = ImplD->getClassInterface(); if (!IFace || IFace->hasDesignatedInitializers()) return; - if (!Ctx.Idents.get("NS_DESIGNATED_INITIALIZER").hasMacroDefinition()) + if (!NSAPIObj->isMacroDefined("NS_DESIGNATED_INITIALIZER")) return; for (const auto *MD : ImplD->instance_methods()) { @@ -1772,9 +1786,7 @@ public: : SourceMgr(SM), OS(OS) { OS << "[\n"; } - ~JSONEditWriter() { - OS << "]\n"; - } + ~JSONEditWriter() override { OS << "]\n"; } private: struct EntryWriter { @@ -1858,13 +1870,16 @@ void ObjCMigrateASTConsumer::HandleTranslationUnit(ASTContext &Ctx) { if (ObjCInterfaceDecl *CDecl = dyn_cast<ObjCInterfaceDecl>(*D)) if (canModify(CDecl)) - migrateObjCInterfaceDecl(Ctx, CDecl); + migrateObjCContainerDecl(Ctx, CDecl); if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(*D)) { if (canModify(CatDecl)) - migrateObjCInterfaceDecl(Ctx, CatDecl); + migrateObjCContainerDecl(Ctx, CatDecl); } - else if (ObjCProtocolDecl *PDecl = dyn_cast<ObjCProtocolDecl>(*D)) + else if (ObjCProtocolDecl *PDecl = dyn_cast<ObjCProtocolDecl>(*D)) { ObjCProtocolDecls.insert(PDecl->getCanonicalDecl()); + if (canModify(PDecl)) + migrateObjCContainerDecl(Ctx, PDecl); + } else if (const ObjCImplementationDecl *ImpDecl = dyn_cast<ObjCImplementationDecl>(*D)) { if ((ASTMigrateActions & FrontendOptions::ObjCMT_ProtocolConformance) && diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/PlistReporter.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/PlistReporter.cpp index 53398b2..9a51690 100644 --- a/contrib/llvm/tools/clang/lib/ARCMigrate/PlistReporter.cpp +++ b/contrib/llvm/tools/clang/lib/ARCMigrate/PlistReporter.cpp @@ -100,16 +100,18 @@ void arcmt::writeARCDiagsToPlist(const std::string &outPath, // Output the location of the bug. o << " <key>location</key>\n"; - EmitLocation(o, SM, LangOpts, D.getLocation(), FM, 2); + EmitLocation(o, SM, D.getLocation(), FM, 2); // Output the ranges (if any). - StoredDiagnostic::range_iterator RI = D.range_begin(), RE = D.range_end(); - - if (RI != RE) { + if (!D.getRanges().empty()) { o << " <key>ranges</key>\n"; o << " <array>\n"; - for (; RI != RE; ++RI) - EmitRange(o, SM, LangOpts, *RI, FM, 4); + for (auto &R : D.getRanges()) { + CharSourceRange ExpansionRange(SM.getExpansionRange(R.getAsRange()), + R.isTokenRange()); + EmitRange(o, SM, Lexer::getAsCharRange(ExpansionRange, SM, LangOpts), + FM, 4); + } o << " </array>\n"; } diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransAPIUses.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransAPIUses.cpp index 544cb0a..40c8a07 100644 --- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransAPIUses.cpp +++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransAPIUses.cpp @@ -95,7 +95,7 @@ public: Pass.TA.clearDiagnostic(diag::err_unavailable, diag::err_unavailable_message, E->getSelectorLoc(0)); - Pass.TA.replace(E->getSourceRange(), getNilString(Pass.Ctx)); + Pass.TA.replace(E->getSourceRange(), getNilString(Pass)); } return true; } diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp index bcbc9e9..7db1a1c 100644 --- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp +++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp @@ -145,7 +145,7 @@ public: // when an exception is thrown. Pass.TA.replace(RecContainer->getSourceRange(), RecRange); std::string str = " = "; - str += getNilString(Pass.Ctx); + str += getNilString(Pass); Pass.TA.insertAfterToken(RecRange.getEnd(), str); return true; } diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnusedInitDelegate.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnusedInitDelegate.cpp index 98571c0..70370ec 100644 --- a/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnusedInitDelegate.cpp +++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnusedInitDelegate.cpp @@ -58,7 +58,7 @@ public: SourceRange ExprRange = ME->getSourceRange(); Pass.TA.insert(ExprRange.getBegin(), "if (!(self = "); std::string retStr = ")) return "; - retStr += getNilString(Pass.Ctx); + retStr += getNilString(Pass); Pass.TA.insertAfterToken(ExprRange.getEnd(), retStr); } return true; diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp index 6ff7b6b..56d3af7 100644 --- a/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp +++ b/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp @@ -16,6 +16,7 @@ #include "clang/Basic/SourceManager.h" #include "clang/Basic/TargetInfo.h" #include "clang/Lex/Lexer.h" +#include "clang/Lex/Preprocessor.h" #include "clang/Sema/Sema.h" #include "clang/Sema/SemaDiagnostic.h" #include "llvm/ADT/DenseSet.h" @@ -212,11 +213,8 @@ bool trans::isGlobalVar(Expr *E) { return false; } -StringRef trans::getNilString(ASTContext &Ctx) { - if (Ctx.Idents.get("nil").hasMacroDefinition()) - return "nil"; - else - return "0"; +StringRef trans::getNilString(MigrationPass &Pass) { + return Pass.SemaRef.PP.isMacroDefined("nil") ? "nil" : "0"; } namespace { diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.h b/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.h index 12551d2..7e3dd34 100644 --- a/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.h +++ b/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.h @@ -180,7 +180,7 @@ SourceLocation findSemiAfterLocation(SourceLocation loc, ASTContext &Ctx, bool hasSideEffects(Expr *E, ASTContext &Ctx); bool isGlobalVar(Expr *E); /// \brief Returns "nil" or "0" if 'nil' macro is not actually defined. -StringRef getNilString(ASTContext &Ctx); +StringRef getNilString(MigrationPass &Pass); template <typename BODY_TRANS> class BodyTransform : public RecursiveASTVisitor<BodyTransform<BODY_TRANS> > { diff --git a/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp b/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp index c9fb80c..4a831d9 100644 --- a/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp +++ b/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp @@ -738,9 +738,9 @@ ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM, FILEDecl(nullptr), jmp_bufDecl(nullptr), sigjmp_bufDecl(nullptr), ucontext_tDecl(nullptr), BlockDescriptorType(nullptr), BlockDescriptorExtendedType(nullptr), cudaConfigureCallDecl(nullptr), - FirstLocalImport(), LastLocalImport(), + FirstLocalImport(), LastLocalImport(), ExternCContext(nullptr), SourceMgr(SM), LangOpts(LOpts), - SanitizerBL(new SanitizerBlacklist(LangOpts.SanitizerBlacklistFile, SM)), + SanitizerBL(new SanitizerBlacklist(LangOpts.SanitizerBlacklistFiles, SM)), AddrSpaceMap(nullptr), Target(nullptr), PrintingPolicy(LOpts), Idents(idents), Selectors(sels), BuiltinInfo(builtins), DeclarationNames(*this), ExternalSource(nullptr), Listener(nullptr), @@ -866,6 +866,38 @@ void ASTContext::PrintStats() const { BumpAlloc.PrintStats(); } +void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M, + bool NotifyListeners) { + if (NotifyListeners) + if (auto *Listener = getASTMutationListener()) + Listener->RedefinedHiddenDefinition(ND, M); + + if (getLangOpts().ModulesLocalVisibility) + MergedDefModules[ND].push_back(M); + else + ND->setHidden(false); +} + +void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) { + auto It = MergedDefModules.find(ND); + if (It == MergedDefModules.end()) + return; + + auto &Merged = It->second; + llvm::DenseSet<Module*> Found; + for (Module *&M : Merged) + if (!Found.insert(M).second) + M = nullptr; + Merged.erase(std::remove(Merged.begin(), Merged.end(), nullptr), Merged.end()); +} + +ExternCContextDecl *ASTContext::getExternCContextDecl() const { + if (!ExternCContext) + ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl()); + + return ExternCContext; +} + RecordDecl *ASTContext::buildImplicitRecord(StringRef Name, RecordDecl::TagKind TK) const { SourceLocation Loc; @@ -877,6 +909,8 @@ RecordDecl *ASTContext::buildImplicitRecord(StringRef Name, NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc, &Idents.get(Name)); NewDecl->setImplicit(); + NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit( + const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default)); return NewDecl; } @@ -1326,7 +1360,7 @@ CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { } Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { - if (VD->hasGlobalStorage()) + if (VD->hasGlobalStorage() && !ForAlignof) Align = std::max(Align, getTargetInfo().getMinGlobalAlign()); } } @@ -1669,13 +1703,23 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { break; } - if (const EnumType *ET = dyn_cast<EnumType>(TT)) - return getTypeInfo(ET->getDecl()->getIntegerType()); + if (const EnumType *ET = dyn_cast<EnumType>(TT)) { + const EnumDecl *ED = ET->getDecl(); + TypeInfo Info = + getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType()); + if (unsigned AttrAlign = ED->getMaxAlignment()) { + Info.Align = AttrAlign; + Info.AlignIsRequired = true; + } + return Info; + } const RecordType *RT = cast<RecordType>(TT); - const ASTRecordLayout &Layout = getASTRecordLayout(RT->getDecl()); + const RecordDecl *RD = RT->getDecl(); + const ASTRecordLayout &Layout = getASTRecordLayout(RD); Width = toBits(Layout.getSize()); Align = toBits(Layout.getAlignment()); + AlignIsRequired = RD->hasAttr<AlignedAttr>(); break; } @@ -1778,13 +1822,20 @@ unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { TypeInfo TI = getTypeInfo(T); unsigned ABIAlign = TI.Align; + T = T->getBaseElementTypeUnsafe(); + + // The preferred alignment of member pointers is that of a pointer. + if (T->isMemberPointerType()) + return getPreferredTypeAlign(getPointerDiffType().getTypePtr()); + if (Target->getTriple().getArch() == llvm::Triple::xcore) return ABIAlign; // Never overalign on XCore. // Double and long long should be naturally aligned if possible. - T = T->getBaseElementTypeUnsafe(); if (const ComplexType *CT = T->getAs<ComplexType>()) T = CT->getElementType().getTypePtr(); + if (const EnumType *ET = T->getAs<EnumType>()) + T = ET->getDecl()->getIntegerType().getTypePtr(); if (T->isSpecificBuiltinType(BuiltinType::Double) || T->isSpecificBuiltinType(BuiltinType::LongLong) || T->isSpecificBuiltinType(BuiltinType::ULongLong)) @@ -1796,6 +1847,13 @@ unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { return ABIAlign; } +/// getTargetDefaultAlignForAttributeAligned - Return the default alignment +/// for __attribute__((aligned)) on this target, to be used if no alignment +/// value is specified. +unsigned ASTContext::getTargetDefaultAlignForAttributeAligned(void) const { + return getTargetInfo().getDefaultAlignForAttributeAligned(); +} + /// getAlignOfGlobalVar - Return the alignment in bits that should be given /// to a global variable of the specified type. unsigned ASTContext::getAlignOfGlobalVar(QualType T) const { @@ -3337,7 +3395,7 @@ ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, (void)CheckT; } - T = new (*this) ElaboratedType(Keyword, NNS, NamedType, Canon); + T = new (*this, TypeAlignment) ElaboratedType(Keyword, NNS, NamedType, Canon); Types.push_back(T); ElaboratedTypes.InsertNode(T, InsertPos); return QualType(T, 0); @@ -3361,7 +3419,7 @@ ASTContext::getParenType(QualType InnerType) const { (void)CheckT; } - T = new (*this) ParenType(InnerType, Canon); + T = new (*this, TypeAlignment) ParenType(InnerType, Canon); Types.push_back(T); ParenTypes.InsertNode(T, InsertPos); return QualType(T, 0); @@ -3390,7 +3448,7 @@ QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, if (T) return QualType(T, 0); - T = new (*this) DependentNameType(Keyword, NNS, Name, Canon); + T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon); Types.push_back(T); DependentNameTypes.InsertNode(T, InsertPos); return QualType(T, 0); @@ -3492,7 +3550,8 @@ QualType ASTContext::getPackExpansionType(QualType Pattern, } } - T = new (*this) PackExpansionType(Pattern, Canon, NumExpansions); + T = new (*this, TypeAlignment) + PackExpansionType(Pattern, Canon, NumExpansions); Types.push_back(T); PackExpansionTypes.InsertNode(T, InsertPos); return QualType(T, 0); @@ -3500,9 +3559,9 @@ QualType ASTContext::getPackExpansionType(QualType Pattern, /// CmpProtocolNames - Comparison predicate for sorting protocols /// alphabetically. -static bool CmpProtocolNames(const ObjCProtocolDecl *LHS, - const ObjCProtocolDecl *RHS) { - return LHS->getDeclName() < RHS->getDeclName(); +static int CmpProtocolNames(ObjCProtocolDecl *const *LHS, + ObjCProtocolDecl *const *RHS) { + return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName()); } static bool areSortedAndUniqued(ObjCProtocolDecl * const *Protocols, @@ -3513,7 +3572,7 @@ static bool areSortedAndUniqued(ObjCProtocolDecl * const *Protocols, return false; for (unsigned i = 1; i != NumProtocols; ++i) - if (!CmpProtocolNames(Protocols[i-1], Protocols[i]) || + if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 || Protocols[i]->getCanonicalDecl() != Protocols[i]) return false; return true; @@ -3524,7 +3583,7 @@ static void SortAndUniqueProtocols(ObjCProtocolDecl **Protocols, ObjCProtocolDecl **ProtocolsEnd = Protocols+NumProtocols; // Sort protocols, keyed by name. - std::sort(Protocols, Protocols+NumProtocols, CmpProtocolNames); + llvm::array_pod_sort(Protocols, ProtocolsEnd, CmpProtocolNames); // Canonicalize. for (unsigned I = 0, N = NumProtocols; I != N; ++I) @@ -4325,6 +4384,19 @@ QualType ASTContext::getSignatureParameterType(QualType T) const { return T.getUnqualifiedType(); } +QualType ASTContext::getExceptionObjectType(QualType T) const { + // C++ [except.throw]p3: + // A throw-expression initializes a temporary object, called the exception + // object, the type of which is determined by removing any top-level + // cv-qualifiers from the static type of the operand of throw and adjusting + // the type from "array of T" or "function returning T" to "pointer to T" + // or "pointer to function returning T", [...] + T = getVariableArrayDecayedType(T); + if (T->isArrayType() || T->isFunctionType()) + T = getDecayedType(T); + return T.getUnqualifiedType(); +} + /// getArrayDecayedType - Return the properly qualified result of decaying the /// specified array type to a pointer. This operation is non-trivial when /// handling typedefs etc. The canonical type of "T" must be an array type, @@ -4859,7 +4931,7 @@ CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const { bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const { return getLangOpts().MSVCCompat && VD->isStaticDataMember() && VD->getType()->isIntegralOrEnumerationType() && - !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit(); + VD->isFirstDecl() && !VD->isOutOfLine() && VD->hasInit(); } static inline @@ -7544,7 +7616,7 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, break; case 'U': assert(!Signed && "Can't use both 'S' and 'U' modifiers!"); - assert(!Unsigned && "Can't use 'S' modifier multiple times!"); + assert(!Unsigned && "Can't use 'U' modifier multiple times!"); Unsigned = true; break; case 'L': @@ -7579,7 +7651,7 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, break; case 'h': assert(HowLong == 0 && !Signed && !Unsigned && - "Bad modifiers used with 'f'!"); + "Bad modifiers used with 'h'!"); Type = Context.HalfTy; break; case 'f': @@ -7806,6 +7878,9 @@ QualType ASTContext::GetBuiltinType(unsigned Id, ArgTypes.push_back(Ty); } + if (Id == Builtin::BI__GetExceptionInfo) + return QualType(); + assert((TypeStr[0] != '.' || TypeStr[1] == 0) && "'.' should only occur at end of builtin type list!"); @@ -7909,7 +7984,7 @@ static GVALinkage basicGVALinkageForVariable(const ASTContext &Context, while (LexicalContext && !isa<FunctionDecl>(LexicalContext)) LexicalContext = LexicalContext->getLexicalParent(); - // Let the static local variable inherit it's linkage from the nearest + // Let the static local variable inherit its linkage from the nearest // enclosing function. if (LexicalContext) StaticLocalLinkage = @@ -8042,7 +8117,9 @@ CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic, if (IsCXXMethod) return ABI->getDefaultMethodCallConv(IsVariadic); - return (LangOpts.MRTD && !IsVariadic) ? CC_X86StdCall : CC_C; + if (LangOpts.MRTD && !IsVariadic) return CC_X86StdCall; + + return Target->getDefaultCallingConv(TargetInfo::CCMT_Unknown); } bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const { @@ -8161,6 +8238,31 @@ MangleNumberingContext *ASTContext::createMangleNumberingContext() const { return ABI->createMangleNumberingContext(); } +const CXXConstructorDecl * +ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) { + return ABI->getCopyConstructorForExceptionObject( + cast<CXXRecordDecl>(RD->getFirstDecl())); +} + +void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD, + CXXConstructorDecl *CD) { + return ABI->addCopyConstructorForExceptionObject( + cast<CXXRecordDecl>(RD->getFirstDecl()), + cast<CXXConstructorDecl>(CD->getFirstDecl())); +} + +void ASTContext::addDefaultArgExprForConstructor(const CXXConstructorDecl *CD, + unsigned ParmIdx, Expr *DAE) { + ABI->addDefaultArgExprForConstructor( + cast<CXXConstructorDecl>(CD->getFirstDecl()), ParmIdx, DAE); +} + +Expr *ASTContext::getDefaultArgExprForConstructor(const CXXConstructorDecl *CD, + unsigned ParmIdx) { + return ABI->getDefaultArgExprForConstructor( + cast<CXXConstructorDecl>(CD->getFirstDecl()), ParmIdx); +} + void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) { ParamIndices[D] = index; } diff --git a/contrib/llvm/tools/clang/lib/AST/ASTDumper.cpp b/contrib/llvm/tools/clang/lib/AST/ASTDumper.cpp index ebf5e65..60cbb06 100644 --- a/contrib/llvm/tools/clang/lib/AST/ASTDumper.cpp +++ b/contrib/llvm/tools/clang/lib/AST/ASTDumper.cpp @@ -508,6 +508,8 @@ namespace { void VisitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *Node); void VisitCXXConstructExpr(const CXXConstructExpr *Node); void VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *Node); + void VisitCXXNewExpr(const CXXNewExpr *Node); + void VisitCXXDeleteExpr(const CXXDeleteExpr *Node); void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Node); void VisitExprWithCleanups(const ExprWithCleanups *Node); void VisitUnresolvedLookupExpr(const UnresolvedLookupExpr *Node); @@ -516,6 +518,7 @@ namespace { VisitExpr(Node); dumpDecl(Node->getLambdaClass()); } + void VisitSizeOfPackExpr(const SizeOfPackExpr *Node); // ObjC void VisitObjCAtCatchStmt(const ObjCAtCatchStmt *Node); @@ -974,8 +977,10 @@ void ASTDumper::dumpDecl(const Decl *D) { dumpSourceRange(D->getSourceRange()); OS << ' '; dumpLocation(D->getLocation()); - if (Module *M = D->getOwningModule()) + if (Module *M = D->getImportedOwningModule()) OS << " in " << M->getFullModuleName(); + else if (Module *M = D->getLocalOwningModule()) + OS << " in (local) " << M->getFullModuleName(); if (const NamedDecl *ND = dyn_cast<NamedDecl>(D)) if (ND->isHidden()) OS << " hidden"; @@ -1099,10 +1104,13 @@ void ASTDumper::VisitFunctionDecl(const FunctionDecl *D) { E = D->getDeclsInPrototypeScope().end(); I != E; ++I) dumpDecl(*I); - for (FunctionDecl::param_const_iterator I = D->param_begin(), - E = D->param_end(); - I != E; ++I) - dumpDecl(*I); + if (!D->param_begin() && D->getNumParams()) + dumpChild([=] { OS << "<<NULL params x " << D->getNumParams() << ">>"; }); + else + for (FunctionDecl::param_const_iterator I = D->param_begin(), + E = D->param_end(); + I != E; ++I) + dumpDecl(*I); if (const CXXConstructorDecl *C = dyn_cast<CXXConstructorDecl>(D)) for (CXXConstructorDecl::init_const_iterator I = C->init_begin(), @@ -1913,6 +1921,32 @@ void ASTDumper::VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *Node) { dumpCXXTemporary(Node->getTemporary()); } +void ASTDumper::VisitCXXNewExpr(const CXXNewExpr *Node) { + VisitExpr(Node); + if (Node->isGlobalNew()) + OS << " global"; + if (Node->isArray()) + OS << " array"; + if (Node->getOperatorNew()) { + OS << ' '; + dumpBareDeclRef(Node->getOperatorNew()); + } + // We could dump the deallocation function used in case of error, but it's + // usually not that interesting. +} + +void ASTDumper::VisitCXXDeleteExpr(const CXXDeleteExpr *Node) { + VisitExpr(Node); + if (Node->isGlobalDelete()) + OS << " global"; + if (Node->isArrayForm()) + OS << " array"; + if (Node->getOperatorDelete()) { + OS << ' '; + dumpBareDeclRef(Node->getOperatorDelete()); + } +} + void ASTDumper::VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Node) { VisitExpr(Node); @@ -1934,6 +1968,13 @@ void ASTDumper::dumpCXXTemporary(const CXXTemporary *Temporary) { OS << ")"; } +void ASTDumper::VisitSizeOfPackExpr(const SizeOfPackExpr *Node) { + VisitExpr(Node); + dumpPointer(Node->getPack()); + dumpName(Node->getPack()); +} + + //===----------------------------------------------------------------------===// // Obj-C Expressions //===----------------------------------------------------------------------===// @@ -2255,6 +2296,11 @@ LLVM_DUMP_METHOD void Stmt::dump(raw_ostream &OS, SourceManager &SM) const { P.dumpStmt(this); } +LLVM_DUMP_METHOD void Stmt::dump(raw_ostream &OS) const { + ASTDumper P(OS, nullptr, nullptr); + P.dumpStmt(this); +} + LLVM_DUMP_METHOD void Stmt::dump() const { ASTDumper P(llvm::errs(), nullptr, nullptr); P.dumpStmt(this); diff --git a/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp b/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp index 2442e8e..911f168 100644 --- a/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp +++ b/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp @@ -67,6 +67,7 @@ namespace clang { // FIXME: DependentDecltypeType QualType VisitRecordType(const RecordType *T); QualType VisitEnumType(const EnumType *T); + QualType VisitAttributedType(const AttributedType *T); // FIXME: TemplateTypeParmType // FIXME: SubstTemplateTypeParmType QualType VisitTemplateSpecializationType(const TemplateSpecializationType *T); @@ -80,7 +81,7 @@ namespace clang { // Importing declarations bool ImportDeclParts(NamedDecl *D, DeclContext *&DC, DeclContext *&LexicalDC, DeclarationName &Name, - SourceLocation &Loc); + NamedDecl *&ToD, SourceLocation &Loc); void ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD = nullptr); void ImportDeclarationNameLoc(const DeclarationNameInfo &From, DeclarationNameInfo& To); @@ -167,7 +168,44 @@ namespace clang { Decl *VisitVarTemplateSpecializationDecl(VarTemplateSpecializationDecl *D); // Importing statements + DeclGroupRef ImportDeclGroup(DeclGroupRef DG); + Stmt *VisitStmt(Stmt *S); + Stmt *VisitDeclStmt(DeclStmt *S); + Stmt *VisitNullStmt(NullStmt *S); + Stmt *VisitCompoundStmt(CompoundStmt *S); + Stmt *VisitCaseStmt(CaseStmt *S); + Stmt *VisitDefaultStmt(DefaultStmt *S); + Stmt *VisitLabelStmt(LabelStmt *S); + Stmt *VisitAttributedStmt(AttributedStmt *S); + Stmt *VisitIfStmt(IfStmt *S); + Stmt *VisitSwitchStmt(SwitchStmt *S); + Stmt *VisitWhileStmt(WhileStmt *S); + Stmt *VisitDoStmt(DoStmt *S); + Stmt *VisitForStmt(ForStmt *S); + Stmt *VisitGotoStmt(GotoStmt *S); + Stmt *VisitIndirectGotoStmt(IndirectGotoStmt *S); + Stmt *VisitContinueStmt(ContinueStmt *S); + Stmt *VisitBreakStmt(BreakStmt *S); + Stmt *VisitReturnStmt(ReturnStmt *S); + // FIXME: GCCAsmStmt + // FIXME: MSAsmStmt + // FIXME: SEHExceptStmt + // FIXME: SEHFinallyStmt + // FIXME: SEHTryStmt + // FIXME: SEHLeaveStmt + // FIXME: CapturedStmt + Stmt *VisitCXXCatchStmt(CXXCatchStmt *S); + Stmt *VisitCXXTryStmt(CXXTryStmt *S); + Stmt *VisitCXXForRangeStmt(CXXForRangeStmt *S); + // FIXME: MSDependentExistsStmt + Stmt *VisitObjCForCollectionStmt(ObjCForCollectionStmt *S); + Stmt *VisitObjCAtCatchStmt(ObjCAtCatchStmt *S); + Stmt *VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S); + Stmt *VisitObjCAtTryStmt(ObjCAtTryStmt *S); + Stmt *VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S); + Stmt *VisitObjCAtThrowStmt(ObjCAtThrowStmt *S); + Stmt *VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S); // Importing expressions Expr *VisitExpr(Expr *E); @@ -181,6 +219,9 @@ namespace clang { Expr *VisitCompoundAssignOperator(CompoundAssignOperator *E); Expr *VisitImplicitCastExpr(ImplicitCastExpr *E); Expr *VisitCStyleCastExpr(CStyleCastExpr *E); + Expr *VisitCXXConstructExpr(CXXConstructExpr *E); + Expr *VisitMemberExpr(MemberExpr *E); + Expr *VisitCallExpr(CallExpr *E); }; } using namespace clang; @@ -1724,6 +1765,27 @@ QualType ASTNodeImporter::VisitEnumType(const EnumType *T) { return Importer.getToContext().getTagDeclType(ToDecl); } +QualType ASTNodeImporter::VisitAttributedType(const AttributedType *T) { + QualType FromModifiedType = T->getModifiedType(); + QualType FromEquivalentType = T->getEquivalentType(); + QualType ToModifiedType; + QualType ToEquivalentType; + + if (!FromModifiedType.isNull()) { + ToModifiedType = Importer.Import(FromModifiedType); + if (ToModifiedType.isNull()) + return QualType(); + } + if (!FromEquivalentType.isNull()) { + ToEquivalentType = Importer.Import(FromEquivalentType); + if (ToEquivalentType.isNull()) + return QualType(); + } + + return Importer.getToContext().getAttributedType(T->getAttrKind(), + ToModifiedType, ToEquivalentType); +} + QualType ASTNodeImporter::VisitTemplateSpecializationType( const TemplateSpecializationType *T) { TemplateName ToTemplate = Importer.Import(T->getTemplateName()); @@ -1808,6 +1870,7 @@ ASTNodeImporter::VisitObjCObjectPointerType(const ObjCObjectPointerType *T) { bool ASTNodeImporter::ImportDeclParts(NamedDecl *D, DeclContext *&DC, DeclContext *&LexicalDC, DeclarationName &Name, + NamedDecl *&ToD, SourceLocation &Loc) { // Import the context of this declaration. DC = Importer.ImportContext(D->getDeclContext()); @@ -1828,6 +1891,7 @@ bool ASTNodeImporter::ImportDeclParts(NamedDecl *D, DeclContext *&DC, // Import the location of this declaration. Loc = Importer.Import(D->getLocation()); + ToD = cast_or_null<NamedDecl>(Importer.GetAlreadyImportedOrNull(D)); return false; } @@ -2009,7 +2073,7 @@ bool ASTNodeImporter::ImportDefinition(RecordDecl *From, RecordDecl *To, bool ASTNodeImporter::ImportDefinition(VarDecl *From, VarDecl *To, ImportDefinitionKind Kind) { - if (To->getDefinition()) + if (To->getAnyInitializer()) return false; // FIXME: Can we really import any initializer? Alternatively, we could force @@ -2239,8 +2303,11 @@ Decl *ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) { DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; - if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + NamedDecl *ToD; + if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return nullptr; + if (ToD) + return ToD; NamespaceDecl *MergeWithNamespace = nullptr; if (!Name) { @@ -2307,8 +2374,11 @@ Decl *ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) { DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; - if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + NamedDecl *ToD; + if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return nullptr; + if (ToD) + return ToD; // If this typedef is not in block scope, determine whether we've // seen a typedef with the same name (that we can merge with) or any @@ -2381,8 +2451,11 @@ Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) { DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; - if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + NamedDecl *ToD; + if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return nullptr; + if (ToD) + return ToD; // Figure out what enum name we're looking for. unsigned IDNS = Decl::IDNS_Tag; @@ -2466,8 +2539,11 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) { DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; - if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + NamedDecl *ToD; + if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return nullptr; + if (ToD) + return ToD; // Figure out what structure name we're looking for. unsigned IDNS = Decl::IDNS_Tag; @@ -2592,8 +2668,11 @@ Decl *ASTNodeImporter::VisitEnumConstantDecl(EnumConstantDecl *D) { DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; - if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + NamedDecl *ToD; + if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return nullptr; + if (ToD) + return ToD; QualType T = Importer.Import(D->getType()); if (T.isNull()) @@ -2648,8 +2727,11 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) { DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; - if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + NamedDecl *ToD; + if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return nullptr; + if (ToD) + return ToD; // Try to find a function in our own ("to") context with the same name, same // type, and in the same context as the function we're importing. @@ -2741,10 +2823,11 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) { // Create the imported function. TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo()); FunctionDecl *ToFunction = nullptr; + SourceLocation InnerLocStart = Importer.Import(D->getInnerLocStart()); if (CXXConstructorDecl *FromConstructor = dyn_cast<CXXConstructorDecl>(D)) { ToFunction = CXXConstructorDecl::Create(Importer.getToContext(), cast<CXXRecordDecl>(DC), - D->getInnerLocStart(), + InnerLocStart, NameInfo, T, TInfo, FromConstructor->isExplicit(), D->isInlineSpecified(), @@ -2753,7 +2836,7 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) { } else if (isa<CXXDestructorDecl>(D)) { ToFunction = CXXDestructorDecl::Create(Importer.getToContext(), cast<CXXRecordDecl>(DC), - D->getInnerLocStart(), + InnerLocStart, NameInfo, T, TInfo, D->isInlineSpecified(), D->isImplicit()); @@ -2761,7 +2844,7 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) { = dyn_cast<CXXConversionDecl>(D)) { ToFunction = CXXConversionDecl::Create(Importer.getToContext(), cast<CXXRecordDecl>(DC), - D->getInnerLocStart(), + InnerLocStart, NameInfo, T, TInfo, D->isInlineSpecified(), FromConversion->isExplicit(), @@ -2770,7 +2853,7 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) { } else if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) { ToFunction = CXXMethodDecl::Create(Importer.getToContext(), cast<CXXRecordDecl>(DC), - D->getInnerLocStart(), + InnerLocStart, NameInfo, T, TInfo, Method->getStorageClass(), Method->isInlineSpecified(), @@ -2778,7 +2861,7 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) { Importer.Import(D->getLocEnd())); } else { ToFunction = FunctionDecl::Create(Importer.getToContext(), DC, - D->getInnerLocStart(), + InnerLocStart, NameInfo, T, TInfo, D->getStorageClass(), D->isInlineSpecified(), D->hasWrittenPrototype(), @@ -2809,6 +2892,13 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) { ToFunction->setType(T); } + // Import the body, if any. + if (Stmt *FromBody = D->getBody()) { + if (Stmt *ToBody = Importer.Import(FromBody)) { + ToFunction->setBody(ToBody); + } + } + // FIXME: Other bits to merge? // Add this function to the lexical context. @@ -2855,8 +2945,11 @@ Decl *ASTNodeImporter::VisitFieldDecl(FieldDecl *D) { DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; - if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + NamedDecl *ToD; + if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return nullptr; + if (ToD) + return ToD; // Determine whether we've already imported this field. SmallVector<NamedDecl *, 2> FoundDecls; @@ -2911,8 +3004,11 @@ Decl *ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) { DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; - if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + NamedDecl *ToD; + if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return nullptr; + if (ToD) + return ToD; // Determine whether we've already imported this field. SmallVector<NamedDecl *, 2> FoundDecls; @@ -2978,8 +3074,11 @@ Decl *ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) { DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; - if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + NamedDecl *ToD; + if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return nullptr; + if (ToD) + return ToD; // Determine whether we've already imported this ivar SmallVector<NamedDecl *, 2> FoundDecls; @@ -3028,8 +3127,11 @@ Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) { DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; - if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + NamedDecl *ToD; + if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return nullptr; + if (ToD) + return ToD; // Try to find a variable in our own ("to") context with the same name and // in the same context as the variable we're importing. @@ -3137,6 +3239,10 @@ Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) { Importer.Imported(D, ToVar); LexicalDC->addDeclInternal(ToVar); + if (!D->isFileVarDecl() && + D->isUsed()) + ToVar->setIsUsed(); + // Merge the initializer. if (ImportDefinition(D, ToVar)) return nullptr; @@ -3196,6 +3302,10 @@ Decl *ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) { T, TInfo, D->getStorageClass(), /*FIXME: Default argument*/nullptr); ToParm->setHasInheritedDefaultArg(D->hasInheritedDefaultArg()); + + if (D->isUsed()) + ToParm->setIsUsed(); + return Importer.Imported(D, ToParm); } @@ -3204,8 +3314,11 @@ Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) { DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; - if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + NamedDecl *ToD; + if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return nullptr; + if (ToD) + return ToD; SmallVector<NamedDecl *, 2> FoundDecls; DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls); @@ -3315,8 +3428,11 @@ Decl *ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) { DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; - if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + NamedDecl *ToD; + if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return nullptr; + if (ToD) + return ToD; ObjCInterfaceDecl *ToInterface = cast_or_null<ObjCInterfaceDecl>(Importer.Import(D->getClassInterface())); @@ -3439,8 +3555,11 @@ Decl *ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) { DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; - if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + NamedDecl *ToD; + if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return nullptr; + if (ToD) + return ToD; ObjCProtocolDecl *MergeWithProtocol = nullptr; SmallVector<NamedDecl *, 2> FoundDecls; @@ -3614,8 +3733,11 @@ Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) { DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; - if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + NamedDecl *ToD; + if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return nullptr; + if (ToD) + return ToD; // Look for an existing interface with the same name. ObjCInterfaceDecl *MergeWithIface = nullptr; @@ -3769,8 +3891,11 @@ Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) { DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; - if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + NamedDecl *ToD; + if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return nullptr; + if (ToD) + return ToD; // Check whether we have already imported this property. SmallVector<NamedDecl *, 2> FoundDecls; @@ -4000,8 +4125,11 @@ Decl *ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) { DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; - if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + NamedDecl *ToD; + if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return nullptr; + if (ToD) + return ToD; // We may already have a template of the same name; try to find and match it. if (!DC->isFunctionOrMethod()) { @@ -4188,8 +4316,11 @@ Decl *ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) { DeclContext *DC, *LexicalDC; DeclarationName Name; SourceLocation Loc; - if (ImportDeclParts(D, DC, LexicalDC, Name, Loc)) + NamedDecl *ToD; + if (ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc)) return nullptr; + if (ToD) + return ToD; // We may already have a template of the same name; try to find and match it. assert(!DC->isFunctionOrMethod() && @@ -4371,10 +4502,457 @@ Decl *ASTNodeImporter::VisitVarTemplateSpecializationDecl( // Import Statements //---------------------------------------------------------------------------- -Stmt *ASTNodeImporter::VisitStmt(Stmt *S) { - Importer.FromDiag(S->getLocStart(), diag::err_unsupported_ast_node) - << S->getStmtClassName(); - return nullptr; +DeclGroupRef ASTNodeImporter::ImportDeclGroup(DeclGroupRef DG) { + if (DG.isNull()) + return DeclGroupRef::Create(Importer.getToContext(), nullptr, 0); + size_t NumDecls = DG.end() - DG.begin(); + SmallVector<Decl *, 1> ToDecls(NumDecls); + auto &_Importer = this->Importer; + std::transform(DG.begin(), DG.end(), ToDecls.begin(), + [&_Importer](Decl *D) -> Decl * { + return _Importer.Import(D); + }); + return DeclGroupRef::Create(Importer.getToContext(), + ToDecls.begin(), + NumDecls); +} + + Stmt *ASTNodeImporter::VisitStmt(Stmt *S) { + Importer.FromDiag(S->getLocStart(), diag::err_unsupported_ast_node) + << S->getStmtClassName(); + return nullptr; + } + +Stmt *ASTNodeImporter::VisitDeclStmt(DeclStmt *S) { + DeclGroupRef ToDG = ImportDeclGroup(S->getDeclGroup()); + for (Decl *ToD : ToDG) { + if (!ToD) + return nullptr; + } + SourceLocation ToStartLoc = Importer.Import(S->getStartLoc()); + SourceLocation ToEndLoc = Importer.Import(S->getEndLoc()); + return new (Importer.getToContext()) DeclStmt(ToDG, ToStartLoc, ToEndLoc); +} + +Stmt *ASTNodeImporter::VisitNullStmt(NullStmt *S) { + SourceLocation ToSemiLoc = Importer.Import(S->getSemiLoc()); + return new (Importer.getToContext()) NullStmt(ToSemiLoc, + S->hasLeadingEmptyMacro()); +} + +Stmt *ASTNodeImporter::VisitCompoundStmt(CompoundStmt *S) { + SmallVector<Stmt *, 4> ToStmts(S->size()); + auto &_Importer = this->Importer; + std::transform(S->body_begin(), S->body_end(), ToStmts.begin(), + [&_Importer](Stmt *CS) -> Stmt * { + return _Importer.Import(CS); + }); + for (Stmt *ToS : ToStmts) { + if (!ToS) + return nullptr; + } + SourceLocation ToLBraceLoc = Importer.Import(S->getLBracLoc()); + SourceLocation ToRBraceLoc = Importer.Import(S->getRBracLoc()); + return new (Importer.getToContext()) CompoundStmt(Importer.getToContext(), + ToStmts, + ToLBraceLoc, ToRBraceLoc); +} + +Stmt *ASTNodeImporter::VisitCaseStmt(CaseStmt *S) { + Expr *ToLHS = Importer.Import(S->getLHS()); + if (!ToLHS) + return nullptr; + Expr *ToRHS = Importer.Import(S->getRHS()); + if (!ToRHS && S->getRHS()) + return nullptr; + SourceLocation ToCaseLoc = Importer.Import(S->getCaseLoc()); + SourceLocation ToEllipsisLoc = Importer.Import(S->getEllipsisLoc()); + SourceLocation ToColonLoc = Importer.Import(S->getColonLoc()); + return new (Importer.getToContext()) CaseStmt(ToLHS, ToRHS, + ToCaseLoc, ToEllipsisLoc, + ToColonLoc); +} + +Stmt *ASTNodeImporter::VisitDefaultStmt(DefaultStmt *S) { + SourceLocation ToDefaultLoc = Importer.Import(S->getDefaultLoc()); + SourceLocation ToColonLoc = Importer.Import(S->getColonLoc()); + Stmt *ToSubStmt = Importer.Import(S->getSubStmt()); + if (!ToSubStmt && S->getSubStmt()) + return nullptr; + return new (Importer.getToContext()) DefaultStmt(ToDefaultLoc, ToColonLoc, + ToSubStmt); +} + +Stmt *ASTNodeImporter::VisitLabelStmt(LabelStmt *S) { + SourceLocation ToIdentLoc = Importer.Import(S->getIdentLoc()); + LabelDecl *ToLabelDecl = + cast_or_null<LabelDecl>(Importer.Import(S->getDecl())); + if (!ToLabelDecl && S->getDecl()) + return nullptr; + Stmt *ToSubStmt = Importer.Import(S->getSubStmt()); + if (!ToSubStmt && S->getSubStmt()) + return nullptr; + return new (Importer.getToContext()) LabelStmt(ToIdentLoc, ToLabelDecl, + ToSubStmt); +} + +Stmt *ASTNodeImporter::VisitAttributedStmt(AttributedStmt *S) { + SourceLocation ToAttrLoc = Importer.Import(S->getAttrLoc()); + ArrayRef<const Attr*> FromAttrs(S->getAttrs()); + SmallVector<const Attr *, 1> ToAttrs(FromAttrs.size()); + ASTContext &_ToContext = Importer.getToContext(); + std::transform(FromAttrs.begin(), FromAttrs.end(), ToAttrs.begin(), + [&_ToContext](const Attr *A) -> const Attr * { + return A->clone(_ToContext); + }); + for (const Attr *ToA : ToAttrs) { + if (!ToA) + return nullptr; + } + Stmt *ToSubStmt = Importer.Import(S->getSubStmt()); + if (!ToSubStmt && S->getSubStmt()) + return nullptr; + return AttributedStmt::Create(Importer.getToContext(), ToAttrLoc, + ToAttrs, ToSubStmt); +} + +Stmt *ASTNodeImporter::VisitIfStmt(IfStmt *S) { + SourceLocation ToIfLoc = Importer.Import(S->getIfLoc()); + VarDecl *ToConditionVariable = nullptr; + if (VarDecl *FromConditionVariable = S->getConditionVariable()) { + ToConditionVariable = + dyn_cast_or_null<VarDecl>(Importer.Import(FromConditionVariable)); + if (!ToConditionVariable) + return nullptr; + } + Expr *ToCondition = Importer.Import(S->getCond()); + if (!ToCondition && S->getCond()) + return nullptr; + Stmt *ToThenStmt = Importer.Import(S->getThen()); + if (!ToThenStmt && S->getThen()) + return nullptr; + SourceLocation ToElseLoc = Importer.Import(S->getElseLoc()); + Stmt *ToElseStmt = Importer.Import(S->getElse()); + if (!ToElseStmt && S->getElse()) + return nullptr; + return new (Importer.getToContext()) IfStmt(Importer.getToContext(), + ToIfLoc, ToConditionVariable, + ToCondition, ToThenStmt, + ToElseLoc, ToElseStmt); +} + +Stmt *ASTNodeImporter::VisitSwitchStmt(SwitchStmt *S) { + VarDecl *ToConditionVariable = nullptr; + if (VarDecl *FromConditionVariable = S->getConditionVariable()) { + ToConditionVariable = + dyn_cast_or_null<VarDecl>(Importer.Import(FromConditionVariable)); + if (!ToConditionVariable) + return nullptr; + } + Expr *ToCondition = Importer.Import(S->getCond()); + if (!ToCondition && S->getCond()) + return nullptr; + SwitchStmt *ToStmt = new (Importer.getToContext()) SwitchStmt( + Importer.getToContext(), ToConditionVariable, + ToCondition); + Stmt *ToBody = Importer.Import(S->getBody()); + if (!ToBody && S->getBody()) + return nullptr; + ToStmt->setBody(ToBody); + ToStmt->setSwitchLoc(Importer.Import(S->getSwitchLoc())); + // Now we have to re-chain the cases. + SwitchCase *LastChainedSwitchCase = nullptr; + for (SwitchCase *SC = S->getSwitchCaseList(); SC != nullptr; + SC = SC->getNextSwitchCase()) { + SwitchCase *ToSC = dyn_cast_or_null<SwitchCase>(Importer.Import(SC)); + if (!ToSC) + return nullptr; + if (LastChainedSwitchCase) + LastChainedSwitchCase->setNextSwitchCase(ToSC); + else + ToStmt->setSwitchCaseList(ToSC); + LastChainedSwitchCase = ToSC; + } + return ToStmt; +} + +Stmt *ASTNodeImporter::VisitWhileStmt(WhileStmt *S) { + VarDecl *ToConditionVariable = nullptr; + if (VarDecl *FromConditionVariable = S->getConditionVariable()) { + ToConditionVariable = + dyn_cast_or_null<VarDecl>(Importer.Import(FromConditionVariable)); + if (!ToConditionVariable) + return nullptr; + } + Expr *ToCondition = Importer.Import(S->getCond()); + if (!ToCondition && S->getCond()) + return nullptr; + Stmt *ToBody = Importer.Import(S->getBody()); + if (!ToBody && S->getBody()) + return nullptr; + SourceLocation ToWhileLoc = Importer.Import(S->getWhileLoc()); + return new (Importer.getToContext()) WhileStmt(Importer.getToContext(), + ToConditionVariable, + ToCondition, ToBody, + ToWhileLoc); +} + +Stmt *ASTNodeImporter::VisitDoStmt(DoStmt *S) { + Stmt *ToBody = Importer.Import(S->getBody()); + if (!ToBody && S->getBody()) + return nullptr; + Expr *ToCondition = Importer.Import(S->getCond()); + if (!ToCondition && S->getCond()) + return nullptr; + SourceLocation ToDoLoc = Importer.Import(S->getDoLoc()); + SourceLocation ToWhileLoc = Importer.Import(S->getWhileLoc()); + SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc()); + return new (Importer.getToContext()) DoStmt(ToBody, ToCondition, + ToDoLoc, ToWhileLoc, + ToRParenLoc); +} + +Stmt *ASTNodeImporter::VisitForStmt(ForStmt *S) { + Stmt *ToInit = Importer.Import(S->getInit()); + if (!ToInit && S->getInit()) + return nullptr; + Expr *ToCondition = Importer.Import(S->getCond()); + if (!ToCondition && S->getCond()) + return nullptr; + VarDecl *ToConditionVariable = nullptr; + if (VarDecl *FromConditionVariable = S->getConditionVariable()) { + ToConditionVariable = + dyn_cast_or_null<VarDecl>(Importer.Import(FromConditionVariable)); + if (!ToConditionVariable) + return nullptr; + } + Expr *ToInc = Importer.Import(S->getInc()); + if (!ToInc && S->getInc()) + return nullptr; + Stmt *ToBody = Importer.Import(S->getBody()); + if (!ToBody && S->getBody()) + return nullptr; + SourceLocation ToForLoc = Importer.Import(S->getForLoc()); + SourceLocation ToLParenLoc = Importer.Import(S->getLParenLoc()); + SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc()); + return new (Importer.getToContext()) ForStmt(Importer.getToContext(), + ToInit, ToCondition, + ToConditionVariable, + ToInc, ToBody, + ToForLoc, ToLParenLoc, + ToRParenLoc); +} + +Stmt *ASTNodeImporter::VisitGotoStmt(GotoStmt *S) { + LabelDecl *ToLabel = nullptr; + if (LabelDecl *FromLabel = S->getLabel()) { + ToLabel = dyn_cast_or_null<LabelDecl>(Importer.Import(FromLabel)); + if (!ToLabel) + return nullptr; + } + SourceLocation ToGotoLoc = Importer.Import(S->getGotoLoc()); + SourceLocation ToLabelLoc = Importer.Import(S->getLabelLoc()); + return new (Importer.getToContext()) GotoStmt(ToLabel, + ToGotoLoc, ToLabelLoc); +} + +Stmt *ASTNodeImporter::VisitIndirectGotoStmt(IndirectGotoStmt *S) { + SourceLocation ToGotoLoc = Importer.Import(S->getGotoLoc()); + SourceLocation ToStarLoc = Importer.Import(S->getStarLoc()); + Expr *ToTarget = Importer.Import(S->getTarget()); + if (!ToTarget && S->getTarget()) + return nullptr; + return new (Importer.getToContext()) IndirectGotoStmt(ToGotoLoc, ToStarLoc, + ToTarget); +} + +Stmt *ASTNodeImporter::VisitContinueStmt(ContinueStmt *S) { + SourceLocation ToContinueLoc = Importer.Import(S->getContinueLoc()); + return new (Importer.getToContext()) ContinueStmt(ToContinueLoc); +} + +Stmt *ASTNodeImporter::VisitBreakStmt(BreakStmt *S) { + SourceLocation ToBreakLoc = Importer.Import(S->getBreakLoc()); + return new (Importer.getToContext()) BreakStmt(ToBreakLoc); +} + +Stmt *ASTNodeImporter::VisitReturnStmt(ReturnStmt *S) { + SourceLocation ToRetLoc = Importer.Import(S->getReturnLoc()); + Expr *ToRetExpr = Importer.Import(S->getRetValue()); + if (!ToRetExpr && S->getRetValue()) + return nullptr; + VarDecl *NRVOCandidate = const_cast<VarDecl*>(S->getNRVOCandidate()); + VarDecl *ToNRVOCandidate = cast_or_null<VarDecl>(Importer.Import(NRVOCandidate)); + if (!ToNRVOCandidate && NRVOCandidate) + return nullptr; + return new (Importer.getToContext()) ReturnStmt(ToRetLoc, ToRetExpr, + ToNRVOCandidate); +} + +Stmt *ASTNodeImporter::VisitCXXCatchStmt(CXXCatchStmt *S) { + SourceLocation ToCatchLoc = Importer.Import(S->getCatchLoc()); + VarDecl *ToExceptionDecl = nullptr; + if (VarDecl *FromExceptionDecl = S->getExceptionDecl()) { + ToExceptionDecl = + dyn_cast_or_null<VarDecl>(Importer.Import(FromExceptionDecl)); + if (!ToExceptionDecl) + return nullptr; + } + Stmt *ToHandlerBlock = Importer.Import(S->getHandlerBlock()); + if (!ToHandlerBlock && S->getHandlerBlock()) + return nullptr; + return new (Importer.getToContext()) CXXCatchStmt(ToCatchLoc, + ToExceptionDecl, + ToHandlerBlock); +} + +Stmt *ASTNodeImporter::VisitCXXTryStmt(CXXTryStmt *S) { + SourceLocation ToTryLoc = Importer.Import(S->getTryLoc()); + Stmt *ToTryBlock = Importer.Import(S->getTryBlock()); + if (!ToTryBlock && S->getTryBlock()) + return nullptr; + SmallVector<Stmt *, 1> ToHandlers(S->getNumHandlers()); + for (unsigned HI = 0, HE = S->getNumHandlers(); HI != HE; ++HI) { + CXXCatchStmt *FromHandler = S->getHandler(HI); + if (Stmt *ToHandler = Importer.Import(FromHandler)) + ToHandlers[HI] = ToHandler; + else + return nullptr; + } + return CXXTryStmt::Create(Importer.getToContext(), ToTryLoc, ToTryBlock, + ToHandlers); +} + +Stmt *ASTNodeImporter::VisitCXXForRangeStmt(CXXForRangeStmt *S) { + DeclStmt *ToRange = + dyn_cast_or_null<DeclStmt>(Importer.Import(S->getRangeStmt())); + if (!ToRange && S->getRangeStmt()) + return nullptr; + DeclStmt *ToBeginEnd = + dyn_cast_or_null<DeclStmt>(Importer.Import(S->getBeginEndStmt())); + if (!ToBeginEnd && S->getBeginEndStmt()) + return nullptr; + Expr *ToCond = Importer.Import(S->getCond()); + if (!ToCond && S->getCond()) + return nullptr; + Expr *ToInc = Importer.Import(S->getInc()); + if (!ToInc && S->getInc()) + return nullptr; + DeclStmt *ToLoopVar = + dyn_cast_or_null<DeclStmt>(Importer.Import(S->getLoopVarStmt())); + if (!ToLoopVar && S->getLoopVarStmt()) + return nullptr; + Stmt *ToBody = Importer.Import(S->getBody()); + if (!ToBody && S->getBody()) + return nullptr; + SourceLocation ToForLoc = Importer.Import(S->getForLoc()); + SourceLocation ToColonLoc = Importer.Import(S->getColonLoc()); + SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc()); + return new (Importer.getToContext()) CXXForRangeStmt(ToRange, ToBeginEnd, + ToCond, ToInc, + ToLoopVar, ToBody, + ToForLoc, ToColonLoc, + ToRParenLoc); +} + +Stmt *ASTNodeImporter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) { + Stmt *ToElem = Importer.Import(S->getElement()); + if (!ToElem && S->getElement()) + return nullptr; + Expr *ToCollect = Importer.Import(S->getCollection()); + if (!ToCollect && S->getCollection()) + return nullptr; + Stmt *ToBody = Importer.Import(S->getBody()); + if (!ToBody && S->getBody()) + return nullptr; + SourceLocation ToForLoc = Importer.Import(S->getForLoc()); + SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc()); + return new (Importer.getToContext()) ObjCForCollectionStmt(ToElem, + ToCollect, + ToBody, ToForLoc, + ToRParenLoc); +} + +Stmt *ASTNodeImporter::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) { + SourceLocation ToAtCatchLoc = Importer.Import(S->getAtCatchLoc()); + SourceLocation ToRParenLoc = Importer.Import(S->getRParenLoc()); + VarDecl *ToExceptionDecl = nullptr; + if (VarDecl *FromExceptionDecl = S->getCatchParamDecl()) { + ToExceptionDecl = + dyn_cast_or_null<VarDecl>(Importer.Import(FromExceptionDecl)); + if (!ToExceptionDecl) + return nullptr; + } + Stmt *ToBody = Importer.Import(S->getCatchBody()); + if (!ToBody && S->getCatchBody()) + return nullptr; + return new (Importer.getToContext()) ObjCAtCatchStmt(ToAtCatchLoc, + ToRParenLoc, + ToExceptionDecl, + ToBody); +} + +Stmt *ASTNodeImporter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) { + SourceLocation ToAtFinallyLoc = Importer.Import(S->getAtFinallyLoc()); + Stmt *ToAtFinallyStmt = Importer.Import(S->getFinallyBody()); + if (!ToAtFinallyStmt && S->getFinallyBody()) + return nullptr; + return new (Importer.getToContext()) ObjCAtFinallyStmt(ToAtFinallyLoc, + ToAtFinallyStmt); +} + +Stmt *ASTNodeImporter::VisitObjCAtTryStmt(ObjCAtTryStmt *S) { + SourceLocation ToAtTryLoc = Importer.Import(S->getAtTryLoc()); + Stmt *ToAtTryStmt = Importer.Import(S->getTryBody()); + if (!ToAtTryStmt && S->getTryBody()) + return nullptr; + SmallVector<Stmt *, 1> ToCatchStmts(S->getNumCatchStmts()); + for (unsigned CI = 0, CE = S->getNumCatchStmts(); CI != CE; ++CI) { + ObjCAtCatchStmt *FromCatchStmt = S->getCatchStmt(CI); + if (Stmt *ToCatchStmt = Importer.Import(FromCatchStmt)) + ToCatchStmts[CI] = ToCatchStmt; + else + return nullptr; + } + Stmt *ToAtFinallyStmt = Importer.Import(S->getFinallyStmt()); + if (!ToAtFinallyStmt && S->getFinallyStmt()) + return nullptr; + return ObjCAtTryStmt::Create(Importer.getToContext(), + ToAtTryLoc, ToAtTryStmt, + ToCatchStmts.begin(), ToCatchStmts.size(), + ToAtFinallyStmt); +} + +Stmt *ASTNodeImporter::VisitObjCAtSynchronizedStmt + (ObjCAtSynchronizedStmt *S) { + SourceLocation ToAtSynchronizedLoc = + Importer.Import(S->getAtSynchronizedLoc()); + Expr *ToSynchExpr = Importer.Import(S->getSynchExpr()); + if (!ToSynchExpr && S->getSynchExpr()) + return nullptr; + Stmt *ToSynchBody = Importer.Import(S->getSynchBody()); + if (!ToSynchBody && S->getSynchBody()) + return nullptr; + return new (Importer.getToContext()) ObjCAtSynchronizedStmt( + ToAtSynchronizedLoc, ToSynchExpr, ToSynchBody); +} + +Stmt *ASTNodeImporter::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) { + SourceLocation ToAtThrowLoc = Importer.Import(S->getThrowLoc()); + Expr *ToThrow = Importer.Import(S->getThrowExpr()); + if (!ToThrow && S->getThrowExpr()) + return nullptr; + return new (Importer.getToContext()) ObjCAtThrowStmt(ToAtThrowLoc, ToThrow); +} + +Stmt *ASTNodeImporter::VisitObjCAutoreleasePoolStmt + (ObjCAutoreleasePoolStmt *S) { + SourceLocation ToAtLoc = Importer.Import(S->getAtLoc()); + Stmt *ToSubStmt = Importer.Import(S->getSubStmt()); + if (!ToSubStmt && S->getSubStmt()) + return nullptr; + return new (Importer.getToContext()) ObjCAutoreleasePoolStmt(ToAtLoc, + ToSubStmt); } //---------------------------------------------------------------------------- @@ -4585,6 +5163,107 @@ Expr *ASTNodeImporter::VisitCStyleCastExpr(CStyleCastExpr *E) { Importer.Import(E->getRParenLoc())); } +Expr *ASTNodeImporter::VisitCXXConstructExpr(CXXConstructExpr *E) { + QualType T = Importer.Import(E->getType()); + if (T.isNull()) + return nullptr; + + CXXConstructorDecl *ToCCD = + dyn_cast<CXXConstructorDecl>(Importer.Import(E->getConstructor())); + if (!ToCCD && E->getConstructor()) + return nullptr; + + size_t NumArgs = E->getNumArgs(); + SmallVector<Expr *, 1> ToArgs(NumArgs); + ASTImporter &_Importer = Importer; + std::transform(E->arg_begin(), E->arg_end(), ToArgs.begin(), + [&_Importer](Expr *AE) -> Expr * { + return _Importer.Import(AE); + }); + for (Expr *ToA : ToArgs) { + if (!ToA) + return nullptr; + } + + return CXXConstructExpr::Create(Importer.getToContext(), T, + Importer.Import(E->getLocation()), + ToCCD, E->isElidable(), + ToArgs, E->hadMultipleCandidates(), + E->isListInitialization(), + E->isStdInitListInitialization(), + E->requiresZeroInitialization(), + E->getConstructionKind(), + Importer.Import(E->getParenOrBraceRange())); +} + +Expr *ASTNodeImporter::VisitMemberExpr(MemberExpr *E) { + QualType T = Importer.Import(E->getType()); + if (T.isNull()) + return nullptr; + + Expr *ToBase = Importer.Import(E->getBase()); + if (!ToBase && E->getBase()) + return nullptr; + + ValueDecl *ToMember = dyn_cast<ValueDecl>(Importer.Import(E->getMemberDecl())); + if (!ToMember && E->getMemberDecl()) + return nullptr; + + DeclAccessPair ToFoundDecl = DeclAccessPair::make( + dyn_cast<NamedDecl>(Importer.Import(E->getFoundDecl().getDecl())), + E->getFoundDecl().getAccess()); + + DeclarationNameInfo ToMemberNameInfo( + Importer.Import(E->getMemberNameInfo().getName()), + Importer.Import(E->getMemberNameInfo().getLoc())); + + if (E->hasExplicitTemplateArgs()) { + return nullptr; // FIXME: handle template arguments + } + + return MemberExpr::Create(Importer.getToContext(), ToBase, + E->isArrow(), + Importer.Import(E->getOperatorLoc()), + Importer.Import(E->getQualifierLoc()), + Importer.Import(E->getTemplateKeywordLoc()), + ToMember, ToFoundDecl, ToMemberNameInfo, + nullptr, T, E->getValueKind(), + E->getObjectKind()); +} + +Expr *ASTNodeImporter::VisitCallExpr(CallExpr *E) { + QualType T = Importer.Import(E->getType()); + if (T.isNull()) + return nullptr; + + Expr *ToCallee = Importer.Import(E->getCallee()); + if (!ToCallee && E->getCallee()) + return nullptr; + + unsigned NumArgs = E->getNumArgs(); + + llvm::SmallVector<Expr *, 2> ToArgs(NumArgs); + + for (unsigned ai = 0, ae = NumArgs; ai != ae; ++ai) { + Expr *FromArg = E->getArg(ai); + Expr *ToArg = Importer.Import(FromArg); + if (!ToArg) + return nullptr; + ToArgs[ai] = ToArg; + } + + Expr **ToArgs_Copied = new (Importer.getToContext()) + Expr*[NumArgs]; + + for (unsigned ai = 0, ae = NumArgs; ai != ae; ++ai) + ToArgs_Copied[ai] = ToArgs[ai]; + + return new (Importer.getToContext()) + CallExpr(Importer.getToContext(), ToCallee, + ArrayRef<Expr*>(ToArgs_Copied, NumArgs), T, E->getValueKind(), + Importer.Import(E->getRParenLoc())); +} + ASTImporter::ASTImporter(ASTContext &ToContext, FileManager &ToFileManager, ASTContext &FromContext, FileManager &FromFileManager, bool MinimalImport) @@ -4636,6 +5315,17 @@ TypeSourceInfo *ASTImporter::Import(TypeSourceInfo *FromTSI) { FromTSI->getTypeLoc().getLocStart()); } +Decl *ASTImporter::GetAlreadyImportedOrNull(Decl *FromD) { + llvm::DenseMap<Decl *, Decl *>::iterator Pos = ImportedDecls.find(FromD); + if (Pos != ImportedDecls.end()) { + Decl *ToD = Pos->second; + ASTNodeImporter(*this).ImportDefinitionIfNeeded(FromD, ToD); + return ToD; + } else { + return nullptr; + } +} + Decl *ASTImporter::Import(Decl *FromD) { if (!FromD) return nullptr; @@ -4927,8 +5617,9 @@ SourceLocation ASTImporter::Import(SourceLocation FromLoc) { FileID ToFileID = Import(Decomposed.first); if (ToFileID.isInvalid()) return SourceLocation(); - return ToSM.getLocForStartOfFile(ToFileID) - .getLocWithOffset(Decomposed.second); + SourceLocation ret = ToSM.getLocForStartOfFile(ToFileID) + .getLocWithOffset(Decomposed.second); + return ret; } SourceRange ASTImporter::Import(SourceRange FromRange) { @@ -4952,7 +5643,7 @@ FileID ASTImporter::Import(FileID FromID) { // Map the FileID for to the "to" source manager. FileID ToID; const SrcMgr::ContentCache *Cache = FromSLoc.getFile().getContentCache(); - if (Cache->OrigEntry) { + if (Cache->OrigEntry && Cache->OrigEntry->getDir()) { // FIXME: We probably want to use getVirtualFile(), so we don't hit the // disk again // FIXME: We definitely want to re-use the existing MemoryBuffer, rather diff --git a/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp b/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp index 0bf6bcd..cb60870 100644 --- a/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp +++ b/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp @@ -7,7 +7,7 @@ // //===----------------------------------------------------------------------===// // -// This file contains out-of-line virtual methods for Attr classes. +// This file contains out-of-line methods for Attr classes. // //===----------------------------------------------------------------------===// @@ -18,10 +18,4 @@ #include "llvm/ADT/StringSwitch.h" using namespace clang; -Attr::~Attr() { } - -void InheritableAttr::anchor() { } - -void InheritableParamAttr::anchor() { } - #include "clang/AST/AttrImpl.inc" diff --git a/contrib/llvm/tools/clang/lib/AST/CXXABI.h b/contrib/llvm/tools/clang/lib/AST/CXXABI.h index 8e9e358..dad2264 100644 --- a/contrib/llvm/tools/clang/lib/AST/CXXABI.h +++ b/contrib/llvm/tools/clang/lib/AST/CXXABI.h @@ -20,6 +20,8 @@ namespace clang { class ASTContext; +class CXXConstructorDecl; +class Expr; class MemberPointerType; class MangleNumberingContext; @@ -41,6 +43,20 @@ public: /// Returns a new mangling number context for this C++ ABI. virtual MangleNumberingContext *createMangleNumberingContext() const = 0; + + /// Adds a mapping from class to copy constructor for this C++ ABI. + virtual void addCopyConstructorForExceptionObject(CXXRecordDecl *, + CXXConstructorDecl *) = 0; + + /// Retrieves the mapping from class to copy constructor for this C++ ABI. + virtual const CXXConstructorDecl * + getCopyConstructorForExceptionObject(CXXRecordDecl *) = 0; + + virtual void addDefaultArgExprForConstructor(const CXXConstructorDecl *CD, + unsigned ParmIdx, Expr *DAE) = 0; + + virtual Expr *getDefaultArgExprForConstructor(const CXXConstructorDecl *CD, + unsigned ParmIdx) = 0; }; /// Creates an instance of a C++ ABI class. diff --git a/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp b/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp index 6e80ee7..800c8f8 100644 --- a/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp +++ b/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp @@ -318,48 +318,36 @@ bool CXXRecordDecl::lookupInBases(BaseMatchesCallback *BaseMatches, // // FIXME: This is an O(N^2) algorithm, but DPG doesn't see an easy // way to make it any faster. - for (CXXBasePaths::paths_iterator P = Paths.begin(), PEnd = Paths.end(); - P != PEnd; /* increment in loop */) { - bool Hidden = false; - - for (CXXBasePath::iterator PE = P->begin(), PEEnd = P->end(); - PE != PEEnd && !Hidden; ++PE) { - if (PE->Base->isVirtual()) { - CXXRecordDecl *VBase = nullptr; - if (const RecordType *Record = PE->Base->getType()->getAs<RecordType>()) - VBase = cast<CXXRecordDecl>(Record->getDecl()); - if (!VBase) + Paths.Paths.remove_if([&Paths](const CXXBasePath &Path) { + for (const CXXBasePathElement &PE : Path) { + if (!PE.Base->isVirtual()) + continue; + + CXXRecordDecl *VBase = nullptr; + if (const RecordType *Record = PE.Base->getType()->getAs<RecordType>()) + VBase = cast<CXXRecordDecl>(Record->getDecl()); + if (!VBase) + break; + + // The declaration(s) we found along this path were found in a + // subobject of a virtual base. Check whether this virtual + // base is a subobject of any other path; if so, then the + // declaration in this path are hidden by that patch. + for (const CXXBasePath &HidingP : Paths) { + CXXRecordDecl *HidingClass = nullptr; + if (const RecordType *Record = + HidingP.back().Base->getType()->getAs<RecordType>()) + HidingClass = cast<CXXRecordDecl>(Record->getDecl()); + if (!HidingClass) break; - // The declaration(s) we found along this path were found in a - // subobject of a virtual base. Check whether this virtual - // base is a subobject of any other path; if so, then the - // declaration in this path are hidden by that patch. - for (CXXBasePaths::paths_iterator HidingP = Paths.begin(), - HidingPEnd = Paths.end(); - HidingP != HidingPEnd; - ++HidingP) { - CXXRecordDecl *HidingClass = nullptr; - if (const RecordType *Record - = HidingP->back().Base->getType()->getAs<RecordType>()) - HidingClass = cast<CXXRecordDecl>(Record->getDecl()); - if (!HidingClass) - break; - - if (HidingClass->isVirtuallyDerivedFrom(VBase)) { - Hidden = true; - break; - } - } + if (HidingClass->isVirtuallyDerivedFrom(VBase)) + return true; } } + return false; + }); - if (Hidden) - P = Paths.Paths.erase(P); - else - ++P; - } - return true; } @@ -569,18 +557,14 @@ void FinalOverriderCollector::Collect(const CXXRecordDecl *RD, // overrider. To do so, we dig down to the original virtual // functions using data recursion and update all of the methods it // overrides. - typedef std::pair<CXXMethodDecl::method_iterator, - CXXMethodDecl::method_iterator> OverriddenMethods; + typedef llvm::iterator_range<CXXMethodDecl::method_iterator> + OverriddenMethods; SmallVector<OverriddenMethods, 4> Stack; - Stack.push_back(std::make_pair(CanonM->begin_overridden_methods(), - CanonM->end_overridden_methods())); + Stack.push_back(llvm::make_range(CanonM->begin_overridden_methods(), + CanonM->end_overridden_methods())); while (!Stack.empty()) { - OverriddenMethods OverMethods = Stack.back(); - Stack.pop_back(); - - for (; OverMethods.first != OverMethods.second; ++OverMethods.first) { - const CXXMethodDecl *CanonOM - = cast<CXXMethodDecl>((*OverMethods.first)->getCanonicalDecl()); + for (const CXXMethodDecl *OM : Stack.pop_back_val()) { + const CXXMethodDecl *CanonOM = OM->getCanonicalDecl(); // C++ [class.virtual]p2: // A virtual member function C::vf of a class object S is @@ -601,8 +585,8 @@ void FinalOverriderCollector::Collect(const CXXRecordDecl *RD, // Continue recursion to the methods that this virtual method // overrides. - Stack.push_back(std::make_pair(CanonOM->begin_overridden_methods(), - CanonOM->end_overridden_methods())); + Stack.push_back(llvm::make_range(CanonOM->begin_overridden_methods(), + CanonOM->end_overridden_methods())); } } @@ -630,54 +614,32 @@ CXXRecordDecl::getFinalOverriders(CXXFinalOverriderMap &FinalOverriders) const { // Weed out any final overriders that come from virtual base class // subobjects that were hidden by other subobjects along any path. // This is the final-overrider variant of C++ [class.member.lookup]p10. - for (CXXFinalOverriderMap::iterator OM = FinalOverriders.begin(), - OMEnd = FinalOverriders.end(); - OM != OMEnd; - ++OM) { - for (OverridingMethods::iterator SO = OM->second.begin(), - SOEnd = OM->second.end(); - SO != SOEnd; - ++SO) { - SmallVectorImpl<UniqueVirtualMethod> &Overriding = SO->second; + for (auto &OM : FinalOverriders) { + for (auto &SO : OM.second) { + SmallVectorImpl<UniqueVirtualMethod> &Overriding = SO.second; if (Overriding.size() < 2) continue; - for (SmallVectorImpl<UniqueVirtualMethod>::iterator - Pos = Overriding.begin(), PosEnd = Overriding.end(); - Pos != PosEnd; - /* increment in loop */) { - if (!Pos->InVirtualSubobject) { - ++Pos; - continue; - } + auto IsHidden = [&Overriding](const UniqueVirtualMethod &M) { + if (!M.InVirtualSubobject) + return false; // We have an overriding method in a virtual base class // subobject (or non-virtual base class subobject thereof); // determine whether there exists an other overriding method // in a base class subobject that hides the virtual base class // subobject. - bool Hidden = false; - for (SmallVectorImpl<UniqueVirtualMethod>::iterator - OP = Overriding.begin(), OPEnd = Overriding.end(); - OP != OPEnd && !Hidden; - ++OP) { - if (Pos == OP) - continue; - - if (OP->Method->getParent()->isVirtuallyDerivedFrom( - const_cast<CXXRecordDecl *>(Pos->InVirtualSubobject))) - Hidden = true; - } - - if (Hidden) { - // The current overriding function is hidden by another - // overriding function; remove this one. - Pos = Overriding.erase(Pos); - PosEnd = Overriding.end(); - } else { - ++Pos; - } - } + for (const UniqueVirtualMethod &OP : Overriding) + if (&M != &OP && + OP.Method->getParent()->isVirtuallyDerivedFrom( + M.InVirtualSubobject)) + return true; + return false; + }; + + Overriding.erase( + std::remove_if(Overriding.begin(), Overriding.end(), IsHidden), + Overriding.end()); } } } diff --git a/contrib/llvm/tools/clang/lib/AST/CommentLexer.cpp b/contrib/llvm/tools/clang/lib/AST/CommentLexer.cpp index 06a08bd..98b7e36 100644 --- a/contrib/llvm/tools/clang/lib/AST/CommentLexer.cpp +++ b/contrib/llvm/tools/clang/lib/AST/CommentLexer.cpp @@ -514,6 +514,12 @@ void Lexer::lexVerbatimBlockBody(Token &T) { if (CommentState == LCS_InsideCComment) skipLineStartingDecorations(); + if (BufferPtr == CommentEnd) { + formTokenWithChars(T, BufferPtr, tok::verbatim_block_line); + T.setVerbatimBlockText(""); + return; + } + lexVerbatimBlockFirstLine(T); } diff --git a/contrib/llvm/tools/clang/lib/AST/Decl.cpp b/contrib/llvm/tools/clang/lib/AST/Decl.cpp index dc08d23..8eff4c4 100644 --- a/contrib/llvm/tools/clang/lib/AST/Decl.cpp +++ b/contrib/llvm/tools/clang/lib/AST/Decl.cpp @@ -44,6 +44,12 @@ bool Decl::isOutOfLine() const { return !getLexicalDeclContext()->Equals(getDeclContext()); } +TranslationUnitDecl::TranslationUnitDecl(ASTContext &ctx) + : Decl(TranslationUnit, nullptr, SourceLocation()), + DeclContext(TranslationUnit), Ctx(ctx), AnonymousNamespace(nullptr) { + Hidden = Ctx.getLangOpts().ModulesLocalVisibility; +} + //===----------------------------------------------------------------------===// // NamedDecl Implementation //===----------------------------------------------------------------------===// @@ -894,13 +900,13 @@ static LinkageInfo getLVForClassMember(const NamedDecl *D, if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) { // If the type of the function uses a type with unique-external // linkage, it's not legally usable from outside this translation unit. - // But only look at the type-as-written. If this function has an auto-deduced - // return type, we can't compute the linkage of that type because it could - // require looking at the linkage of this function, and we don't need this - // for correctness because the type is not part of the function's - // signature. - // FIXME: This is a hack. We should be able to solve this circularity and the - // one in getLVForNamespaceScopeDecl for Functions some other way. + // But only look at the type-as-written. If this function has an + // auto-deduced return type, we can't compute the linkage of that type + // because it could require looking at the linkage of this function, and we + // don't need this for correctness because the type is not part of the + // function's signature. + // FIXME: This is a hack. We should be able to solve this circularity and + // the one in getLVForNamespaceScopeDecl for Functions some other way. { QualType TypeAsWritten = MD->getType(); if (TypeSourceInfo *TSI = MD->getTypeSourceInfo()) @@ -1445,74 +1451,127 @@ void NamedDecl::getNameForDiagnostic(raw_ostream &OS, printName(OS); } -bool NamedDecl::declarationReplaces(NamedDecl *OldD) const { - assert(getDeclName() == OldD->getDeclName() && "Declaration name mismatch"); +static bool isKindReplaceableBy(Decl::Kind OldK, Decl::Kind NewK) { + // For method declarations, we never replace. + if (ObjCMethodDecl::classofKind(NewK)) + return false; - // UsingDirectiveDecl's are not really NamedDecl's, and all have same name. - // We want to keep it, unless it nominates same namespace. - if (getKind() == Decl::UsingDirective) { - return cast<UsingDirectiveDecl>(this)->getNominatedNamespace() - ->getOriginalNamespace() == - cast<UsingDirectiveDecl>(OldD)->getNominatedNamespace() - ->getOriginalNamespace(); + if (OldK == NewK) + return true; + + // A compatibility alias for a class can be replaced by an interface. + if (ObjCCompatibleAliasDecl::classofKind(OldK) && + ObjCInterfaceDecl::classofKind(NewK)) + return true; + + // A typedef-declaration, alias-declaration, or Objective-C class declaration + // can replace another declaration of the same type. Semantic analysis checks + // that we have matching types. + if ((TypedefNameDecl::classofKind(OldK) || + ObjCInterfaceDecl::classofKind(OldK)) && + (TypedefNameDecl::classofKind(NewK) || + ObjCInterfaceDecl::classofKind(NewK))) + return true; + + // Otherwise, a kind mismatch implies that the declaration is not replaced. + return false; +} + +template<typename T> static bool isRedeclarableImpl(Redeclarable<T> *) { + return true; +} +static bool isRedeclarableImpl(...) { return false; } +static bool isRedeclarable(Decl::Kind K) { + switch (K) { +#define DECL(Type, Base) \ + case Decl::Type: \ + return isRedeclarableImpl((Type##Decl *)nullptr); +#define ABSTRACT_DECL(DECL) +#include "clang/AST/DeclNodes.inc" } + llvm_unreachable("unknown decl kind"); +} - if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this)) - // For function declarations, we keep track of redeclarations. - return FD->getPreviousDecl() == OldD; +bool NamedDecl::declarationReplaces(NamedDecl *OldD, bool IsKnownNewer) const { + assert(getDeclName() == OldD->getDeclName() && "Declaration name mismatch"); - // For function templates, the underlying function declarations are linked. - if (const FunctionTemplateDecl *FunctionTemplate - = dyn_cast<FunctionTemplateDecl>(this)) - if (const FunctionTemplateDecl *OldFunctionTemplate - = dyn_cast<FunctionTemplateDecl>(OldD)) - return FunctionTemplate->getTemplatedDecl() - ->declarationReplaces(OldFunctionTemplate->getTemplatedDecl()); - - // For method declarations, we keep track of redeclarations. - if (isa<ObjCMethodDecl>(this)) + // Never replace one imported declaration with another; we need both results + // when re-exporting. + if (OldD->isFromASTFile() && isFromASTFile()) return false; - // FIXME: Is this correct if one of the decls comes from an inline namespace? - if (isa<ObjCInterfaceDecl>(this) && isa<ObjCCompatibleAliasDecl>(OldD)) - return true; + if (!isKindReplaceableBy(OldD->getKind(), getKind())) + return false; - if (isa<UsingShadowDecl>(this) && isa<UsingShadowDecl>(OldD)) - return cast<UsingShadowDecl>(this)->getTargetDecl() == - cast<UsingShadowDecl>(OldD)->getTargetDecl(); + // Inline namespaces can give us two declarations with the same + // name and kind in the same scope but different contexts; we should + // keep both declarations in this case. + if (!this->getDeclContext()->getRedeclContext()->Equals( + OldD->getDeclContext()->getRedeclContext())) + return false; - if (isa<UsingDecl>(this) && isa<UsingDecl>(OldD)) { + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this)) + // For function declarations, we keep track of redeclarations. + // FIXME: This returns false for functions that should in fact be replaced. + // Instead, perform some kind of type check? + if (FD->getPreviousDecl() != OldD) + return false; + + // For function templates, the underlying function declarations are linked. + if (const FunctionTemplateDecl *FunctionTemplate = + dyn_cast<FunctionTemplateDecl>(this)) + return FunctionTemplate->getTemplatedDecl()->declarationReplaces( + cast<FunctionTemplateDecl>(OldD)->getTemplatedDecl()); + + // Using shadow declarations can be overloaded on their target declarations + // if they introduce functions. + // FIXME: If our target replaces the old target, can we replace the old + // shadow declaration? + if (auto *USD = dyn_cast<UsingShadowDecl>(this)) + if (USD->getTargetDecl() != cast<UsingShadowDecl>(OldD)->getTargetDecl()) + return false; + + // Using declarations can be overloaded if they introduce functions. + if (auto *UD = dyn_cast<UsingDecl>(this)) { ASTContext &Context = getASTContext(); - return Context.getCanonicalNestedNameSpecifier( - cast<UsingDecl>(this)->getQualifier()) == + return Context.getCanonicalNestedNameSpecifier(UD->getQualifier()) == Context.getCanonicalNestedNameSpecifier( - cast<UsingDecl>(OldD)->getQualifier()); + cast<UsingDecl>(OldD)->getQualifier()); } - - if (isa<UnresolvedUsingValueDecl>(this) && - isa<UnresolvedUsingValueDecl>(OldD)) { + if (auto *UUVD = dyn_cast<UnresolvedUsingValueDecl>(this)) { ASTContext &Context = getASTContext(); - return Context.getCanonicalNestedNameSpecifier( - cast<UnresolvedUsingValueDecl>(this)->getQualifier()) == + return Context.getCanonicalNestedNameSpecifier(UUVD->getQualifier()) == Context.getCanonicalNestedNameSpecifier( cast<UnresolvedUsingValueDecl>(OldD)->getQualifier()); } - // A typedef of an Objective-C class type can replace an Objective-C class - // declaration or definition, and vice versa. - // FIXME: Is this correct if one of the decls comes from an inline namespace? - if ((isa<TypedefNameDecl>(this) && isa<ObjCInterfaceDecl>(OldD)) || - (isa<ObjCInterfaceDecl>(this) && isa<TypedefNameDecl>(OldD))) - return true; + // UsingDirectiveDecl's are not really NamedDecl's, and all have same name. + // We want to keep it, unless it nominates same namespace. + if (auto *UD = dyn_cast<UsingDirectiveDecl>(this)) + return UD->getNominatedNamespace()->getOriginalNamespace() == + cast<UsingDirectiveDecl>(OldD)->getNominatedNamespace() + ->getOriginalNamespace(); + + if (!IsKnownNewer && isRedeclarable(getKind())) { + // Check whether this is actually newer than OldD. We want to keep the + // newer declaration. This loop will usually only iterate once, because + // OldD is usually the previous declaration. + for (auto D : redecls()) { + if (D == OldD) + break; - // For non-function declarations, if the declarations are of the - // same kind and have the same parent then this must be a redeclaration, - // or semantic analysis would not have given us the new declaration. - // Note that inline namespaces can give us two declarations with the same - // name and kind in the same scope but different contexts. - return this->getKind() == OldD->getKind() && - this->getDeclContext()->getRedeclContext()->Equals( - OldD->getDeclContext()->getRedeclContext()); + // If we reach the canonical declaration, then OldD is not actually older + // than this one. + // + // FIXME: In this case, we should not add this decl to the lookup table. + if (D->isCanonicalDecl()) + return false; + } + } + + // It's a newer declaration of the same kind of declaration in the same scope, + // and not an overload: we want this decl instead of the existing one. + return true; } bool NamedDecl::hasLinkage() const { @@ -1684,8 +1743,7 @@ QualifierInfo::setTemplateParameterListsInfo(ASTContext &Context, if (NumTPLists > 0) { TemplParamLists = new (Context) TemplateParameterList*[NumTPLists]; NumTemplParamLists = NumTPLists; - for (unsigned i = NumTPLists; i-- > 0; ) - TemplParamLists[i] = TPLists[i]; + std::copy(TPLists, TPLists + NumTPLists, TemplParamLists); } } @@ -1717,6 +1775,8 @@ VarDecl::VarDecl(Kind DK, ASTContext &C, DeclContext *DC, "VarDeclBitfields too large!"); static_assert(sizeof(ParmVarDeclBitfields) <= sizeof(unsigned), "ParmVarDeclBitfields too large!"); + static_assert(sizeof(NonParmVarDeclBitfields) <= sizeof(unsigned), + "NonParmVarDeclBitfields too large!"); AllBits = 0; VarDeclBits.SClass = SC; // Everything else is implicitly initialized to false. @@ -1743,9 +1803,12 @@ void VarDecl::setStorageClass(StorageClass SC) { VarDecl::TLSKind VarDecl::getTLSKind() const { switch (VarDeclBits.TSCSpec) { case TSCS_unspecified: - if (hasAttr<ThreadAttr>()) - return TLS_Static; - return TLS_None; + if (!hasAttr<ThreadAttr>()) + return TLS_None; + return getASTContext().getLangOpts().isCompatibleWithMSVC( + LangOptions::MSVC2015) + ? TLS_Dynamic + : TLS_Static; case TSCS___thread: // Fall through. case TSCS__Thread_local: return TLS_Static; @@ -1825,9 +1888,8 @@ bool VarDecl::isInExternCXXContext() const { VarDecl *VarDecl::getCanonicalDecl() { return getFirstDecl(); } -VarDecl::DefinitionKind VarDecl::isThisDeclarationADefinition( - ASTContext &C) const -{ +VarDecl::DefinitionKind +VarDecl::isThisDeclarationADefinition(ASTContext &C) const { // C++ [basic.def]p2: // A declaration is a definition unless [...] it contains the 'extern' // specifier or a linkage-specification and neither an initializer [...], @@ -1867,6 +1929,10 @@ VarDecl::DefinitionKind VarDecl::isThisDeclarationADefinition( if (hasAttr<AliasAttr>()) return Definition; + if (const auto *SAA = getAttr<SelectAnyAttr>()) + if (!SAA->isInherited()) + return Definition; + // A variable template specialization (other than a static data member // template or an explicit specialization) is a declaration until we // instantiate its initializer. @@ -2460,39 +2526,6 @@ bool FunctionDecl::isReplaceableGlobalAllocationFunction() const { return RD && isNamed(RD, "nothrow_t") && RD->isInStdNamespace(); } -FunctionDecl * -FunctionDecl::getCorrespondingUnsizedGlobalDeallocationFunction() const { - ASTContext &Ctx = getASTContext(); - if (!Ctx.getLangOpts().SizedDeallocation) - return nullptr; - - if (getDeclName().getNameKind() != DeclarationName::CXXOperatorName) - return nullptr; - if (getDeclName().getCXXOverloadedOperator() != OO_Delete && - getDeclName().getCXXOverloadedOperator() != OO_Array_Delete) - return nullptr; - if (isa<CXXRecordDecl>(getDeclContext())) - return nullptr; - - if (!getDeclContext()->getRedeclContext()->isTranslationUnit()) - return nullptr; - - if (getNumParams() != 2 || isVariadic() || - !Ctx.hasSameType(getType()->castAs<FunctionProtoType>()->getParamType(1), - Ctx.getSizeType())) - return nullptr; - - // This is a sized deallocation function. Find the corresponding unsized - // deallocation function. - lookup_const_result R = getDeclContext()->lookup(getDeclName()); - for (lookup_const_result::iterator RI = R.begin(), RE = R.end(); RI != RE; - ++RI) - if (FunctionDecl *FD = dyn_cast<FunctionDecl>(*RI)) - if (FD->getNumParams() == 1 && !FD->isVariadic()) - return FD; - return nullptr; -} - LanguageLinkage FunctionDecl::getLanguageLinkage() const { return getDeclLanguageLinkage(*this); } @@ -2550,10 +2583,6 @@ FunctionDecl::setPreviousDeclaration(FunctionDecl *PrevDecl) { IsInline = true; } -const FunctionDecl *FunctionDecl::getCanonicalDecl() const { - return getFirstDecl(); -} - FunctionDecl *FunctionDecl::getCanonicalDecl() { return getFirstDecl(); } /// \brief Returns a value indicating whether this function @@ -2581,7 +2610,14 @@ unsigned FunctionDecl::getBuiltinID() const { // extern "C". // FIXME: A recognised library function may not be directly in an extern "C" // declaration, for instance "extern "C" { namespace std { decl } }". - if (!LinkageDecl || LinkageDecl->getLanguage() != LinkageSpecDecl::lang_c) + if (!LinkageDecl) { + if (BuiltinID == Builtin::BI__GetExceptionInfo && + Context.getTargetInfo().getCXXABI().isMicrosoft() && + isInStdNamespace()) + return Builtin::BI__GetExceptionInfo; + return 0; + } + if (LinkageDecl->getLanguage() != LinkageSpecDecl::lang_c) return 0; } @@ -2796,6 +2832,18 @@ SourceRange FunctionDecl::getReturnTypeSourceRange() const { return RTRange; } +bool FunctionDecl::hasUnusedResultAttr() const { + QualType RetType = getReturnType(); + if (RetType->isRecordType()) { + const CXXRecordDecl *Ret = RetType->getAsCXXRecordDecl(); + const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(this); + if (Ret && Ret->hasAttr<WarnUnusedResultAttr>() && + !(MD && MD->getCorrespondingMethodInClass(Ret, true))) + return true; + } + return hasAttr<WarnUnusedResultAttr>(); +} + /// \brief For an inline function definition in C, or for a gnu_inline function /// in C++, determine whether the definition will be externally visible. /// @@ -3077,6 +3125,8 @@ DependentFunctionTemplateSpecializationInfo:: DependentFunctionTemplateSpecializationInfo(const UnresolvedSetImpl &Ts, const TemplateArgumentListInfo &TArgs) : AngleLocs(TArgs.getLAngleLoc(), TArgs.getRAngleLoc()) { + static_assert(sizeof(*this) % llvm::AlignOf<void *>::Alignment == 0, + "Trailing data is unaligned!"); d.NumTemplates = Ts.size(); d.NumArgs = TArgs.size(); @@ -3590,7 +3640,7 @@ void RecordDecl::completeDefinition() { /// This which can be turned on with an attribute, pragma, or the /// -mms-bitfields command-line option. bool RecordDecl::isMsStruct(const ASTContext &C) const { - return hasAttr<MsStructAttr>() || C.getLangOpts().MSBitfields == 1; + return hasAttr<MSStructAttr>() || C.getLangOpts().MSBitfields == 1; } static bool isFieldOrIndirectField(Decl::Kind K) { @@ -3747,6 +3797,13 @@ TranslationUnitDecl *TranslationUnitDecl::Create(ASTContext &C) { return new (C, (DeclContext *)nullptr) TranslationUnitDecl(C); } +void ExternCContextDecl::anchor() { } + +ExternCContextDecl *ExternCContextDecl::Create(const ASTContext &C, + TranslationUnitDecl *DC) { + return new (C, DC) ExternCContextDecl(DC); +} + void LabelDecl::anchor() { } LabelDecl *LabelDecl::Create(ASTContext &C, DeclContext *DC, @@ -3885,6 +3942,21 @@ TypedefDecl *TypedefDecl::Create(ASTContext &C, DeclContext *DC, void TypedefNameDecl::anchor() { } +TagDecl *TypedefNameDecl::getAnonDeclWithTypedefName(bool AnyRedecl) const { + if (auto *TT = getTypeSourceInfo()->getType()->getAs<TagType>()) { + auto *OwningTypedef = TT->getDecl()->getTypedefNameForAnonDecl(); + auto *ThisTypedef = this; + if (AnyRedecl && OwningTypedef) { + OwningTypedef = OwningTypedef->getCanonicalDecl(); + ThisTypedef = ThisTypedef->getCanonicalDecl(); + } + if (OwningTypedef == ThisTypedef) + return TT->getDecl(); + } + + return nullptr; +} + TypedefDecl *TypedefDecl::CreateDeserialized(ASTContext &C, unsigned ID) { return new (C, ID) TypedefDecl(C, nullptr, SourceLocation(), SourceLocation(), nullptr, nullptr); diff --git a/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp b/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp index a46787f..79cadcf 100644 --- a/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp +++ b/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp @@ -66,6 +66,12 @@ void *Decl::operator new(std::size_t Size, const ASTContext &Context, void *Decl::operator new(std::size_t Size, const ASTContext &Ctx, DeclContext *Parent, std::size_t Extra) { assert(!Parent || &Parent->getParentASTContext() == &Ctx); + // With local visibility enabled, we track the owning module even for local + // declarations. + if (Ctx.getLangOpts().ModulesLocalVisibility) { + void *Buffer = ::operator new(sizeof(Module *) + Size + Extra, Ctx); + return new (Buffer) Module*(nullptr) + 1; + } return ::operator new(Size + Extra, Ctx); } @@ -336,20 +342,34 @@ bool Decl::isReferenced() const { static AvailabilityResult CheckAvailability(ASTContext &Context, const AvailabilityAttr *A, std::string *Message) { - StringRef TargetPlatform = Context.getTargetInfo().getPlatformName(); - StringRef PrettyPlatformName - = AvailabilityAttr::getPrettyPlatformName(TargetPlatform); - if (PrettyPlatformName.empty()) - PrettyPlatformName = TargetPlatform; + VersionTuple TargetMinVersion = + Context.getTargetInfo().getPlatformMinVersion(); - VersionTuple TargetMinVersion = Context.getTargetInfo().getPlatformMinVersion(); if (TargetMinVersion.empty()) return AR_Available; + // Check if this is an App Extension "platform", and if so chop off + // the suffix for matching with the actual platform. + StringRef ActualPlatform = A->getPlatform()->getName(); + StringRef RealizedPlatform = ActualPlatform; + if (Context.getLangOpts().AppExt) { + size_t suffix = RealizedPlatform.rfind("_app_extension"); + if (suffix != StringRef::npos) + RealizedPlatform = RealizedPlatform.slice(0, suffix); + } + + StringRef TargetPlatform = Context.getTargetInfo().getPlatformName(); + // Match the platform name. - if (A->getPlatform()->getName() != TargetPlatform) + if (RealizedPlatform != TargetPlatform) return AR_Available; - + + StringRef PrettyPlatformName + = AvailabilityAttr::getPrettyPlatformName(ActualPlatform); + + if (PrettyPlatformName.empty()) + PrettyPlatformName = ActualPlatform; + std::string HintMessage; if (!A->getMessage().empty()) { HintMessage = " - "; @@ -583,6 +603,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) { case Block: case Captured: case TranslationUnit: + case ExternCContext: case UsingDirective: case ClassTemplateSpecialization: @@ -846,6 +867,10 @@ bool DeclContext::isDependentContext() const { return getLexicalParent()->isDependentContext(); } + // FIXME: A variable template is a dependent context, but is not a + // DeclContext. A context within it (such as a lambda-expression) + // should be considered dependent. + return getParent() && getParent()->isDependentContext(); } @@ -889,6 +914,7 @@ bool DeclContext::Encloses(const DeclContext *DC) const { DeclContext *DeclContext::getPrimaryContext() { switch (DeclKind) { case Decl::TranslationUnit: + case Decl::ExternCContext: case Decl::LinkageSpec: case Decl::Block: case Decl::Captured: @@ -991,23 +1017,24 @@ DeclContext::BuildDeclChain(ArrayRef<Decl*> Decls, /// built a lookup map. For every name in the map, pull in the new names from /// the external storage. void DeclContext::reconcileExternalVisibleStorage() const { - assert(NeedToReconcileExternalVisibleStorage && LookupPtr.getPointer()); + assert(NeedToReconcileExternalVisibleStorage && LookupPtr); NeedToReconcileExternalVisibleStorage = false; - for (auto &Lookup : *LookupPtr.getPointer()) + for (auto &Lookup : *LookupPtr) Lookup.second.setHasExternalDecls(); } /// \brief Load the declarations within this lexical storage from an /// external source. -void +/// \return \c true if any declarations were added. +bool DeclContext::LoadLexicalDeclsFromExternalStorage() const { ExternalASTSource *Source = getParentASTContext().getExternalSource(); assert(hasExternalLexicalStorage() && Source && "No external storage?"); // Notify that we have a DeclContext that is initializing. ExternalASTSource::Deserializing ADeclContext(Source); - + // Load the external declarations, if any. SmallVector<Decl*, 64> Decls; ExternalLexicalStorage = false; @@ -1017,11 +1044,11 @@ DeclContext::LoadLexicalDeclsFromExternalStorage() const { case ELR_Failure: case ELR_AlreadyLoaded: - return; + return false; } if (Decls.empty()) - return; + return false; // We may have already loaded just the fields of this record, in which case // we need to ignore them. @@ -1038,6 +1065,7 @@ DeclContext::LoadLexicalDeclsFromExternalStorage() const { FirstDecl = ExternalFirst; if (!LastDecl) LastDecl = ExternalLast; + return true; } DeclContext::lookup_result @@ -1045,7 +1073,7 @@ ExternalASTSource::SetNoExternalVisibleDeclsForName(const DeclContext *DC, DeclarationName Name) { ASTContext &Context = DC->getParentASTContext(); StoredDeclsMap *Map; - if (!(Map = DC->LookupPtr.getPointer())) + if (!(Map = DC->LookupPtr)) Map = DC->CreateStoredDeclsMap(Context); if (DC->NeedToReconcileExternalVisibleStorage) DC->reconcileExternalVisibleStorage(); @@ -1061,7 +1089,7 @@ ExternalASTSource::SetExternalVisibleDeclsForName(const DeclContext *DC, ArrayRef<NamedDecl*> Decls) { ASTContext &Context = DC->getParentASTContext(); StoredDeclsMap *Map; - if (!(Map = DC->LookupPtr.getPointer())) + if (!(Map = DC->LookupPtr)) Map = DC->CreateStoredDeclsMap(Context); if (DC->NeedToReconcileExternalVisibleStorage) DC->reconcileExternalVisibleStorage(); @@ -1078,7 +1106,7 @@ ExternalASTSource::SetExternalVisibleDeclsForName(const DeclContext *DC, // first. llvm::SmallVector<unsigned, 8> Skip; for (unsigned I = 0, N = Decls.size(); I != N; ++I) - if (List.HandleRedeclaration(Decls[I])) + if (List.HandleRedeclaration(Decls[I], /*IsKnownNewer*/false)) Skip.push_back(I); Skip.push_back(Decls.size()); @@ -1155,7 +1183,7 @@ void DeclContext::removeDecl(Decl *D) { // Remove only decls that have a name if (!ND->getDeclName()) return; - StoredDeclsMap *Map = getPrimaryContext()->LookupPtr.getPointer(); + StoredDeclsMap *Map = getPrimaryContext()->LookupPtr; if (!Map) return; StoredDeclsMap::iterator Pos = Map->find(ND->getDeclName()); @@ -1243,32 +1271,38 @@ static bool shouldBeHidden(NamedDecl *D) { StoredDeclsMap *DeclContext::buildLookup() { assert(this == getPrimaryContext() && "buildLookup called on non-primary DC"); - // FIXME: Should we keep going if hasExternalVisibleStorage? - if (!LookupPtr.getInt()) - return LookupPtr.getPointer(); + if (!HasLazyLocalLexicalLookups && !HasLazyExternalLexicalLookups) + return LookupPtr; SmallVector<DeclContext *, 2> Contexts; collectAllContexts(Contexts); - for (unsigned I = 0, N = Contexts.size(); I != N; ++I) - buildLookupImpl<&DeclContext::decls_begin, - &DeclContext::decls_end>(Contexts[I]); + + if (HasLazyExternalLexicalLookups) { + HasLazyExternalLexicalLookups = false; + for (auto *DC : Contexts) { + if (DC->hasExternalLexicalStorage()) + HasLazyLocalLexicalLookups |= + DC->LoadLexicalDeclsFromExternalStorage(); + } + + if (!HasLazyLocalLexicalLookups) + return LookupPtr; + } + + for (auto *DC : Contexts) + buildLookupImpl(DC, hasExternalVisibleStorage()); // We no longer have any lazy decls. - LookupPtr.setInt(false); - return LookupPtr.getPointer(); + HasLazyLocalLexicalLookups = false; + return LookupPtr; } /// buildLookupImpl - Build part of the lookup data structure for the /// declarations contained within DCtx, which will either be this /// DeclContext, a DeclContext linked to it, or a transparent context /// nested within it. -template<DeclContext::decl_iterator (DeclContext::*Begin)() const, - DeclContext::decl_iterator (DeclContext::*End)() const> -void DeclContext::buildLookupImpl(DeclContext *DCtx) { - for (decl_iterator I = (DCtx->*Begin)(), E = (DCtx->*End)(); - I != E; ++I) { - Decl *D = *I; - +void DeclContext::buildLookupImpl(DeclContext *DCtx, bool Internal) { + for (Decl *D : DCtx->noload_decls()) { // Insert this declaration into the lookup structure, but only if // it's semantically within its decl context. Any other decls which // should be found in this context are added eagerly. @@ -1282,39 +1316,46 @@ void DeclContext::buildLookupImpl(DeclContext *DCtx) { (!ND->isFromASTFile() || (isTranslationUnit() && !getParentASTContext().getLangOpts().CPlusPlus))) - makeDeclVisibleInContextImpl(ND, false); + makeDeclVisibleInContextImpl(ND, Internal); // If this declaration is itself a transparent declaration context // or inline namespace, add the members of this declaration of that // context (recursively). if (DeclContext *InnerCtx = dyn_cast<DeclContext>(D)) if (InnerCtx->isTransparentContext() || InnerCtx->isInlineNamespace()) - buildLookupImpl<Begin, End>(InnerCtx); + buildLookupImpl(InnerCtx, Internal); } } +NamedDecl *const DeclContextLookupResult::SingleElementDummyList = nullptr; + DeclContext::lookup_result -DeclContext::lookup(DeclarationName Name) { +DeclContext::lookup(DeclarationName Name) const { assert(DeclKind != Decl::LinkageSpec && "Should not perform lookups into linkage specs!"); - DeclContext *PrimaryContext = getPrimaryContext(); + const DeclContext *PrimaryContext = getPrimaryContext(); if (PrimaryContext != this) return PrimaryContext->lookup(Name); - // If this is a namespace, ensure that any later redeclarations of it have - // been loaded, since they may add names to the result of this lookup. - if (auto *ND = dyn_cast<NamespaceDecl>(this)) - (void)ND->getMostRecentDecl(); + // If we have an external source, ensure that any later redeclarations of this + // context have been loaded, since they may add names to the result of this + // lookup (or add external visible storage). + ExternalASTSource *Source = getParentASTContext().getExternalSource(); + if (Source) + (void)cast<Decl>(this)->getMostRecentDecl(); if (hasExternalVisibleStorage()) { + assert(Source && "external visible storage but no external source?"); + if (NeedToReconcileExternalVisibleStorage) reconcileExternalVisibleStorage(); - StoredDeclsMap *Map = LookupPtr.getPointer(); + StoredDeclsMap *Map = LookupPtr; - if (LookupPtr.getInt()) - Map = buildLookup(); + if (HasLazyLocalLexicalLookups || HasLazyExternalLexicalLookups) + // FIXME: Make buildLookup const? + Map = const_cast<DeclContext*>(this)->buildLookup(); if (!Map) Map = CreateStoredDeclsMap(getParentASTContext()); @@ -1325,28 +1366,27 @@ DeclContext::lookup(DeclarationName Name) { if (!R.second && !R.first->second.hasExternalDecls()) return R.first->second.getLookupResult(); - ExternalASTSource *Source = getParentASTContext().getExternalSource(); if (Source->FindExternalVisibleDeclsByName(this, Name) || !R.second) { - if (StoredDeclsMap *Map = LookupPtr.getPointer()) { + if (StoredDeclsMap *Map = LookupPtr) { StoredDeclsMap::iterator I = Map->find(Name); if (I != Map->end()) return I->second.getLookupResult(); } } - return lookup_result(lookup_iterator(nullptr), lookup_iterator(nullptr)); + return lookup_result(); } - StoredDeclsMap *Map = LookupPtr.getPointer(); - if (LookupPtr.getInt()) - Map = buildLookup(); + StoredDeclsMap *Map = LookupPtr; + if (HasLazyLocalLexicalLookups || HasLazyExternalLexicalLookups) + Map = const_cast<DeclContext*>(this)->buildLookup(); if (!Map) - return lookup_result(lookup_iterator(nullptr), lookup_iterator(nullptr)); + return lookup_result(); StoredDeclsMap::iterator I = Map->find(Name); if (I == Map->end()) - return lookup_result(lookup_iterator(nullptr), lookup_iterator(nullptr)); + return lookup_result(); return I->second.getLookupResult(); } @@ -1355,40 +1395,29 @@ DeclContext::lookup_result DeclContext::noload_lookup(DeclarationName Name) { assert(DeclKind != Decl::LinkageSpec && "Should not perform lookups into linkage specs!"); - if (!hasExternalVisibleStorage()) - return lookup(Name); DeclContext *PrimaryContext = getPrimaryContext(); if (PrimaryContext != this) return PrimaryContext->noload_lookup(Name); - StoredDeclsMap *Map = LookupPtr.getPointer(); - if (LookupPtr.getInt()) { - // Carefully build the lookup map, without deserializing anything. + // If we have any lazy lexical declarations not in our lookup map, add them + // now. Don't import any external declarations, not even if we know we have + // some missing from the external visible lookups. + if (HasLazyLocalLexicalLookups) { SmallVector<DeclContext *, 2> Contexts; collectAllContexts(Contexts); for (unsigned I = 0, N = Contexts.size(); I != N; ++I) - buildLookupImpl<&DeclContext::noload_decls_begin, - &DeclContext::noload_decls_end>(Contexts[I]); - - // We no longer have any lazy decls. - LookupPtr.setInt(false); - - // There may now be names for which we have local decls but are - // missing the external decls. FIXME: Just set the hasExternalDecls - // flag on those names that have external decls. - NeedToReconcileExternalVisibleStorage = true; - - Map = LookupPtr.getPointer(); + buildLookupImpl(Contexts[I], hasExternalVisibleStorage()); + HasLazyLocalLexicalLookups = false; } + StoredDeclsMap *Map = LookupPtr; if (!Map) - return lookup_result(lookup_iterator(nullptr), lookup_iterator(nullptr)); + return lookup_result(); StoredDeclsMap::iterator I = Map->find(Name); return I != Map->end() ? I->second.getLookupResult() - : lookup_result(lookup_iterator(nullptr), - lookup_iterator(nullptr)); + : lookup_result(); } void DeclContext::localUncachedLookup(DeclarationName Name, @@ -1404,8 +1433,9 @@ void DeclContext::localUncachedLookup(DeclarationName Name, } // If we have a lookup table, check there first. Maybe we'll get lucky. - if (Name && !LookupPtr.getInt()) { - if (StoredDeclsMap *Map = LookupPtr.getPointer()) { + // FIXME: Should we be checking these flags on the primary context? + if (Name && !HasLazyLocalLexicalLookups && !HasLazyExternalLexicalLookups) { + if (StoredDeclsMap *Map = LookupPtr) { StoredDeclsMap::iterator Pos = Map->find(Name); if (Pos != Map->end()) { Results.insert(Results.end(), @@ -1418,6 +1448,8 @@ void DeclContext::localUncachedLookup(DeclarationName Name, // Slow case: grovel through the declarations in our chain looking for // matches. + // FIXME: If we have lazy external declarations, this will not find them! + // FIXME: Should we CollectAllContexts and walk them all here? for (Decl *D = FirstDecl; D; D = D->getNextDeclInContext()) { if (NamedDecl *ND = dyn_cast<NamedDecl>(D)) if (ND->getDeclName() == Name) @@ -1498,7 +1530,7 @@ void DeclContext::makeDeclVisibleInContextWithFlags(NamedDecl *D, bool Internal, // FIXME: As a performance hack, don't add such decls into the translation // unit unless we're in C++, since qualified lookup into the TU is never // performed. - if (LookupPtr.getPointer() || hasExternalVisibleStorage() || + if (LookupPtr || hasExternalVisibleStorage() || ((!Recoverable || D->getDeclContext() != D->getLexicalDeclContext()) && (getParentASTContext().getLangOpts().CPlusPlus || !isTranslationUnit()))) { @@ -1508,7 +1540,7 @@ void DeclContext::makeDeclVisibleInContextWithFlags(NamedDecl *D, bool Internal, buildLookup(); makeDeclVisibleInContextImpl(D, Internal); } else { - LookupPtr.setInt(true); + HasLazyLocalLexicalLookups = true; } // If we are a transparent context or inline namespace, insert into our @@ -1526,7 +1558,7 @@ void DeclContext::makeDeclVisibleInContextWithFlags(NamedDecl *D, bool Internal, void DeclContext::makeDeclVisibleInContextImpl(NamedDecl *D, bool Internal) { // Find or create the stored declaration map. - StoredDeclsMap *Map = LookupPtr.getPointer(); + StoredDeclsMap *Map = LookupPtr; if (!Map) { ASTContext *C = &getParentASTContext(); Map = CreateStoredDeclsMap(*C); @@ -1555,12 +1587,12 @@ void DeclContext::makeDeclVisibleInContextImpl(NamedDecl *D, bool Internal) { return; } - else if (DeclNameEntries.isNull()) { + if (DeclNameEntries.isNull()) { DeclNameEntries.setOnlyValue(D); return; } - if (DeclNameEntries.HandleRedeclaration(D)) { + if (DeclNameEntries.HandleRedeclaration(D, /*IsKnownNewer*/!Internal)) { // This declaration has replaced an existing one for which // declarationReplaces returns true. return; @@ -1570,15 +1602,17 @@ void DeclContext::makeDeclVisibleInContextImpl(NamedDecl *D, bool Internal) { DeclNameEntries.AddSubsequentDecl(D); } +UsingDirectiveDecl *DeclContext::udir_iterator::operator*() const { + return cast<UsingDirectiveDecl>(*I); +} + /// Returns iterator range [First, Last) of UsingDirectiveDecls stored within /// this context. DeclContext::udir_range DeclContext::using_directives() const { // FIXME: Use something more efficient than normal lookup for using // directives. In C++, using directives are looked up more than anything else. - lookup_const_result Result = lookup(UsingDirectiveDecl::getName()); - return udir_range( - reinterpret_cast<UsingDirectiveDecl *const *>(Result.begin()), - reinterpret_cast<UsingDirectiveDecl *const *>(Result.end())); + lookup_result Result = lookup(UsingDirectiveDecl::getName()); + return udir_range(Result.begin(), Result.end()); } //===----------------------------------------------------------------------===// @@ -1586,7 +1620,7 @@ DeclContext::udir_range DeclContext::using_directives() const { //===----------------------------------------------------------------------===// StoredDeclsMap *DeclContext::CreateStoredDeclsMap(ASTContext &C) const { - assert(!LookupPtr.getPointer() && "context already has a decls map"); + assert(!LookupPtr && "context already has a decls map"); assert(getPrimaryContext() == this && "creating decls map on non-primary context"); @@ -1598,7 +1632,7 @@ StoredDeclsMap *DeclContext::CreateStoredDeclsMap(ASTContext &C) const { M = new StoredDeclsMap(); M->Previous = C.LastSDM; C.LastSDM = llvm::PointerIntPair<StoredDeclsMap*,1>(M, Dependent); - LookupPtr.setPointer(M); + LookupPtr = M; return M; } @@ -1630,11 +1664,11 @@ DependentDiagnostic *DependentDiagnostic::Create(ASTContext &C, assert(Parent->isDependentContext() && "cannot iterate dependent diagnostics of non-dependent context"); Parent = Parent->getPrimaryContext(); - if (!Parent->LookupPtr.getPointer()) + if (!Parent->LookupPtr) Parent->CreateStoredDeclsMap(C); - DependentStoredDeclsMap *Map - = static_cast<DependentStoredDeclsMap*>(Parent->LookupPtr.getPointer()); + DependentStoredDeclsMap *Map = + static_cast<DependentStoredDeclsMap *>(Parent->LookupPtr); // Allocate the copy of the PartialDiagnostic via the ASTContext's // BumpPtrAllocator, rather than the ASTContext itself. diff --git a/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp b/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp index 9af8c4b..8dc62dd 100644 --- a/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp +++ b/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp @@ -991,7 +991,7 @@ CXXMethodDecl* CXXRecordDecl::getLambdaCallOperator() const { if (!isLambda()) return nullptr; DeclarationName Name = getASTContext().DeclarationNames.getCXXOperatorName(OO_Call); - DeclContext::lookup_const_result Calls = lookup(Name); + DeclContext::lookup_result Calls = lookup(Name); assert(!Calls.empty() && "Missing lambda call operator!"); assert(Calls.size() == 1 && "More than one lambda call operator!"); @@ -1008,7 +1008,7 @@ CXXMethodDecl* CXXRecordDecl::getLambdaStaticInvoker() const { if (!isLambda()) return nullptr; DeclarationName Name = &getASTContext().Idents.get(getLambdaStaticInvokerName()); - DeclContext::lookup_const_result Invoker = lookup(Name); + DeclContext::lookup_result Invoker = lookup(Name); if (Invoker.empty()) return nullptr; assert(Invoker.size() == 1 && "More than one static invoker operator!"); NamedDecl *InvokerFun = Invoker.front(); @@ -1173,7 +1173,7 @@ static void CollectVisibleConversions(ASTContext &Context, /// getVisibleConversionFunctions - get all conversion functions visible /// in current class; including conversion function templates. -std::pair<CXXRecordDecl::conversion_iterator,CXXRecordDecl::conversion_iterator> +llvm::iterator_range<CXXRecordDecl::conversion_iterator> CXXRecordDecl::getVisibleConversionFunctions() { ASTContext &Ctx = getASTContext(); @@ -1189,7 +1189,7 @@ CXXRecordDecl::getVisibleConversionFunctions() { data().ComputedVisibleConversions = true; } } - return std::make_pair(Set->begin(), Set->end()); + return llvm::make_range(Set->begin(), Set->end()); } void CXXRecordDecl::removeConversion(const NamedDecl *ConvDecl) { @@ -1307,7 +1307,7 @@ CXXDestructorDecl *CXXRecordDecl::getDestructor() const { = Context.DeclarationNames.getCXXDestructorName( Context.getCanonicalType(ClassType)); - DeclContext::lookup_const_result R = lookup(Name); + DeclContext::lookup_result R = lookup(Name); if (R.empty()) return nullptr; @@ -1418,9 +1418,8 @@ CXXMethodDecl::getCorrespondingMethodInClass(const CXXRecordDecl *RD, return nullptr; } - lookup_const_result Candidates = RD->lookup(getDeclName()); - for (NamedDecl * const * I = Candidates.begin(); I != Candidates.end(); ++I) { - CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(*I); + for (auto *ND : RD->lookup(getDeclName())) { + CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND); if (!MD) continue; if (recursivelyOverrides(MD, this)) @@ -1491,8 +1490,8 @@ bool CXXMethodDecl::isUsualDeallocationFunction() const { // This function is a usual deallocation function if there are no // single-parameter deallocation functions of the same kind. - DeclContext::lookup_const_result R = getDeclContext()->lookup(getDeclName()); - for (DeclContext::lookup_const_result::iterator I = R.begin(), E = R.end(); + DeclContext::lookup_result R = getDeclContext()->lookup(getDeclName()); + for (DeclContext::lookup_result::iterator I = R.begin(), E = R.end(); I != E; ++I) { if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) if (FD->getNumParams() == 1) @@ -1740,6 +1739,10 @@ CXXConstructorDecl::Create(ASTContext &C, CXXRecordDecl *RD, isImplicitlyDeclared, isConstexpr); } +CXXConstructorDecl::init_const_iterator CXXConstructorDecl::init_begin() const { + return CtorInitializers.get(getASTContext().getExternalSource()); +} + CXXConstructorDecl *CXXConstructorDecl::getTargetConstructor() const { assert(isDelegatingConstructor() && "Not a delegating constructor!"); Expr *E = (*init_begin())->getInit()->IgnoreImplicit(); @@ -1887,6 +1890,15 @@ CXXDestructorDecl::Create(ASTContext &C, CXXRecordDecl *RD, isInline, isImplicitlyDeclared); } +void CXXDestructorDecl::setOperatorDelete(FunctionDecl *OD) { + auto *First = cast<CXXDestructorDecl>(getFirstDecl()); + if (OD && !First->OperatorDelete) { + First->OperatorDelete = OD; + if (auto *L = getASTMutationListener()) + L->ResolvedOperatorDelete(First, OD); + } +} + void CXXConversionDecl::anchor() { } CXXConversionDecl * diff --git a/contrib/llvm/tools/clang/lib/AST/DeclGroup.cpp b/contrib/llvm/tools/clang/lib/AST/DeclGroup.cpp index 9861f22..512837f 100644 --- a/contrib/llvm/tools/clang/lib/AST/DeclGroup.cpp +++ b/contrib/llvm/tools/clang/lib/AST/DeclGroup.cpp @@ -18,6 +18,8 @@ using namespace clang; DeclGroup* DeclGroup::Create(ASTContext &C, Decl **Decls, unsigned NumDecls) { + static_assert(sizeof(DeclGroup) % llvm::AlignOf<void *>::Alignment == 0, + "Trailing data is unaligned!"); assert(NumDecls > 1 && "Invalid DeclGroup"); unsigned Size = sizeof(DeclGroup) + sizeof(Decl*) * NumDecls; void* Mem = C.Allocate(Size, llvm::AlignOf<DeclGroup>::Alignment); diff --git a/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp b/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp index ed53675..a63ba7e 100644 --- a/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp +++ b/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp @@ -54,8 +54,8 @@ void ObjCContainerDecl::anchor() { } /// ObjCIvarDecl * ObjCContainerDecl::getIvarDecl(IdentifierInfo *Id) const { - lookup_const_result R = lookup(Id); - for (lookup_const_iterator Ivar = R.begin(), IvarEnd = R.end(); + lookup_result R = lookup(Id); + for (lookup_iterator Ivar = R.begin(), IvarEnd = R.end(); Ivar != IvarEnd; ++Ivar) { if (ObjCIvarDecl *ivar = dyn_cast<ObjCIvarDecl>(*Ivar)) return ivar; @@ -83,8 +83,8 @@ ObjCContainerDecl::getMethod(Selector Sel, bool isInstance, // + (float) class_method; // @end // - lookup_const_result R = lookup(Sel); - for (lookup_const_iterator Meth = R.begin(), MethEnd = R.end(); + lookup_result R = lookup(Sel); + for (lookup_iterator Meth = R.begin(), MethEnd = R.end(); Meth != MethEnd; ++Meth) { ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(*Meth); if (MD && MD->isInstanceMethod() == isInstance) @@ -101,8 +101,8 @@ ObjCContainerDecl::getMethod(Selector Sel, bool isInstance, bool ObjCContainerDecl::HasUserDeclaredSetterMethod( const ObjCPropertyDecl *Property) const { Selector Sel = Property->getSetterName(); - lookup_const_result R = lookup(Sel); - for (lookup_const_iterator Meth = R.begin(), MethEnd = R.end(); + lookup_result R = lookup(Sel); + for (lookup_iterator Meth = R.begin(), MethEnd = R.end(); Meth != MethEnd; ++Meth) { ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(*Meth); if (MD && MD->isInstanceMethod() && !MD->isImplicit()) @@ -161,8 +161,8 @@ ObjCPropertyDecl::findPropertyDecl(const DeclContext *DC, return nullptr; } - DeclContext::lookup_const_result R = DC->lookup(propertyID); - for (DeclContext::lookup_const_iterator I = R.begin(), E = R.end(); I != E; + DeclContext::lookup_result R = DC->lookup(propertyID); + for (DeclContext::lookup_iterator I = R.begin(), E = R.end(); I != E; ++I) if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(*I)) return PD; @@ -334,9 +334,8 @@ void ObjCInterfaceDecl::mergeClassExtensionProtocolList( return; // Merge ProtocolRefs into class's protocol list; - for (auto *P : all_referenced_protocols()) { - ProtocolRefs.push_back(P); - } + ProtocolRefs.append(all_referenced_protocol_begin(), + all_referenced_protocol_end()); data().AllReferencedProtocols.set(ProtocolRefs.data(), ProtocolRefs.size(),C); } @@ -617,8 +616,7 @@ ObjCMethodDecl *ObjCInterfaceDecl::lookupPrivateMethod( // Look through local category implementations associated with the class. if (!Method) - Method = Instance ? getCategoryInstanceMethod(Sel) - : getCategoryClassMethod(Sel); + Method = getCategoryMethod(Sel, Instance); // Before we give up, check if the selector is an instance method. // But only in the root. This matches gcc's behavior and what the @@ -1101,7 +1099,7 @@ ObjCMethodDecl::findPropertyDecl(bool CheckOverrides) const { if (NumArgs > 1) return nullptr; - if (!isInstanceMethod() || getMethodFamily() != OMF_None) + if (!isInstanceMethod()) return nullptr; if (isPropertyAccessor()) { @@ -1822,6 +1820,11 @@ void ObjCImplementationDecl::setIvarInitializers(ASTContext &C, } } +ObjCImplementationDecl::init_const_iterator +ObjCImplementationDecl::init_begin() const { + return IvarInitializers.get(getASTContext().getExternalSource()); +} + raw_ostream &clang::operator<<(raw_ostream &OS, const ObjCImplementationDecl &ID) { OS << ID.getName(); diff --git a/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp b/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp index 0d1d2a4..6374a92 100644 --- a/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp +++ b/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp @@ -160,17 +160,43 @@ RedeclarableTemplateDecl::CommonBase *RedeclarableTemplateDecl::getCommonPtr() c return Common; } -template <class EntryType> -typename RedeclarableTemplateDecl::SpecEntryTraits<EntryType>::DeclType* +template<class EntryType> +typename RedeclarableTemplateDecl::SpecEntryTraits<EntryType>::DeclType * RedeclarableTemplateDecl::findSpecializationImpl( - llvm::FoldingSetVector<EntryType> &Specs, - ArrayRef<TemplateArgument> Args, - void *&InsertPos) { + llvm::FoldingSetVector<EntryType> &Specs, ArrayRef<TemplateArgument> Args, + void *&InsertPos) { typedef SpecEntryTraits<EntryType> SETraits; llvm::FoldingSetNodeID ID; EntryType::Profile(ID,Args, getASTContext()); EntryType *Entry = Specs.FindNodeOrInsertPos(ID, InsertPos); - return Entry ? SETraits::getMostRecentDecl(Entry) : nullptr; + return Entry ? SETraits::getDecl(Entry)->getMostRecentDecl() : nullptr; +} + +template<class Derived, class EntryType> +void RedeclarableTemplateDecl::addSpecializationImpl( + llvm::FoldingSetVector<EntryType> &Specializations, EntryType *Entry, + void *InsertPos) { + typedef SpecEntryTraits<EntryType> SETraits; + if (InsertPos) { +#ifndef NDEBUG + void *CorrectInsertPos; + assert(!findSpecializationImpl(Specializations, + SETraits::getTemplateArgs(Entry), + CorrectInsertPos) && + InsertPos == CorrectInsertPos && + "given incorrect InsertPos for specialization"); +#endif + Specializations.InsertNode(Entry, InsertPos); + } else { + EntryType *Existing = Specializations.GetOrInsertNode(Entry); + (void)Existing; + assert(SETraits::getDecl(Existing)->isCanonicalDecl() && + "non-canonical specialization?"); + } + + if (ASTMutationListener *L = getASTMutationListener()) + L->AddedCXXTemplateSpecialization(cast<Derived>(this), + SETraits::getDecl(Entry)); } /// \brief Generate the injected template arguments for the given template @@ -246,7 +272,11 @@ FunctionTemplateDecl::newCommon(ASTContext &C) const { } void FunctionTemplateDecl::LoadLazySpecializations() const { - Common *CommonPtr = getCommonPtr(); + // Grab the most recent declaration to ensure we've loaded any lazy + // redeclarations of this template. + // + // FIXME: Avoid walking the entire redeclaration chain here. + Common *CommonPtr = getMostRecentDecl()->getCommonPtr(); if (CommonPtr->LazySpecializations) { ASTContext &Context = getASTContext(); uint32_t *Specs = CommonPtr->LazySpecializations; @@ -270,12 +300,8 @@ FunctionTemplateDecl::findSpecialization(ArrayRef<TemplateArgument> Args, void FunctionTemplateDecl::addSpecialization( FunctionTemplateSpecializationInfo *Info, void *InsertPos) { - if (InsertPos) - getSpecializations().InsertNode(Info, InsertPos); - else - getSpecializations().GetOrInsertNode(Info); - if (ASTMutationListener *L = getASTMutationListener()) - L->AddedCXXTemplateSpecialization(this, Info->Function); + addSpecializationImpl<FunctionTemplateDecl>(getSpecializations(), Info, + InsertPos); } ArrayRef<TemplateArgument> FunctionTemplateDecl::getInjectedTemplateArgs() { @@ -320,7 +346,11 @@ ClassTemplateDecl *ClassTemplateDecl::CreateDeserialized(ASTContext &C, } void ClassTemplateDecl::LoadLazySpecializations() const { - Common *CommonPtr = getCommonPtr(); + // Grab the most recent declaration to ensure we've loaded any lazy + // redeclarations of this template. + // + // FIXME: Avoid walking the entire redeclaration chain here. + Common *CommonPtr = getMostRecentDecl()->getCommonPtr(); if (CommonPtr->LazySpecializations) { ASTContext &Context = getASTContext(); uint32_t *Specs = CommonPtr->LazySpecializations; @@ -357,16 +387,7 @@ ClassTemplateDecl::findSpecialization(ArrayRef<TemplateArgument> Args, void ClassTemplateDecl::AddSpecialization(ClassTemplateSpecializationDecl *D, void *InsertPos) { - if (InsertPos) - getSpecializations().InsertNode(D, InsertPos); - else { - ClassTemplateSpecializationDecl *Existing - = getSpecializations().GetOrInsertNode(D); - (void)Existing; - assert(Existing->isCanonicalDecl() && "Non-canonical specialization?"); - } - if (ASTMutationListener *L = getASTMutationListener()) - L->AddedCXXTemplateSpecialization(this, D); + addSpecializationImpl<ClassTemplateDecl>(getSpecializations(), D, InsertPos); } ClassTemplatePartialSpecializationDecl * @@ -953,7 +974,11 @@ VarTemplateDecl *VarTemplateDecl::CreateDeserialized(ASTContext &C, // TODO: Unify across class, function and variable templates? // May require moving this and Common to RedeclarableTemplateDecl. void VarTemplateDecl::LoadLazySpecializations() const { - Common *CommonPtr = getCommonPtr(); + // Grab the most recent declaration to ensure we've loaded any lazy + // redeclarations of this template. + // + // FIXME: Avoid walking the entire redeclaration chain here. + Common *CommonPtr = getMostRecentDecl()->getCommonPtr(); if (CommonPtr->LazySpecializations) { ASTContext &Context = getASTContext(); uint32_t *Specs = CommonPtr->LazySpecializations; @@ -990,16 +1015,7 @@ VarTemplateDecl::findSpecialization(ArrayRef<TemplateArgument> Args, void VarTemplateDecl::AddSpecialization(VarTemplateSpecializationDecl *D, void *InsertPos) { - if (InsertPos) - getSpecializations().InsertNode(D, InsertPos); - else { - VarTemplateSpecializationDecl *Existing = - getSpecializations().GetOrInsertNode(D); - (void)Existing; - assert(Existing->isCanonicalDecl() && "Non-canonical specialization?"); - } - if (ASTMutationListener *L = getASTMutationListener()) - L->AddedCXXTemplateSpecialization(this, D); + addSpecializationImpl<VarTemplateDecl>(getSpecializations(), D, InsertPos); } VarTemplatePartialSpecializationDecl * diff --git a/contrib/llvm/tools/clang/lib/AST/Expr.cpp b/contrib/llvm/tools/clang/lib/AST/Expr.cpp index 712de50..76a4da2 100644 --- a/contrib/llvm/tools/clang/lib/AST/Expr.cpp +++ b/contrib/llvm/tools/clang/lib/AST/Expr.cpp @@ -1141,40 +1141,13 @@ CallExpr::CallExpr(const ASTContext& C, StmtClass SC, Expr *fn, RParenLoc = rparenloc; } -CallExpr::CallExpr(const ASTContext& C, Expr *fn, ArrayRef<Expr*> args, +CallExpr::CallExpr(const ASTContext &C, Expr *fn, ArrayRef<Expr *> args, QualType t, ExprValueKind VK, SourceLocation rparenloc) - : Expr(CallExprClass, t, VK, OK_Ordinary, - fn->isTypeDependent(), - fn->isValueDependent(), - fn->isInstantiationDependent(), - fn->containsUnexpandedParameterPack()), - NumArgs(args.size()) { - - SubExprs = new (C) Stmt*[args.size()+PREARGS_START]; - SubExprs[FN] = fn; - for (unsigned i = 0; i != args.size(); ++i) { - if (args[i]->isTypeDependent()) - ExprBits.TypeDependent = true; - if (args[i]->isValueDependent()) - ExprBits.ValueDependent = true; - if (args[i]->isInstantiationDependent()) - ExprBits.InstantiationDependent = true; - if (args[i]->containsUnexpandedParameterPack()) - ExprBits.ContainsUnexpandedParameterPack = true; - - SubExprs[i+PREARGS_START] = args[i]; - } - - CallExprBits.NumPreArgs = 0; - RParenLoc = rparenloc; + : CallExpr(C, CallExprClass, fn, /*NumPreArgs=*/0, args, t, VK, rparenloc) { } CallExpr::CallExpr(const ASTContext &C, StmtClass SC, EmptyShell Empty) - : Expr(SC, Empty), SubExprs(nullptr), NumArgs(0) { - // FIXME: Why do we allocate this? - SubExprs = new (C) Stmt*[PREARGS_START]; - CallExprBits.NumPreArgs = 0; -} + : CallExpr(C, SC, /*NumPreArgs=*/0, Empty) {} CallExpr::CallExpr(const ASTContext &C, StmtClass SC, unsigned NumPreArgs, EmptyShell Empty) @@ -1271,16 +1244,21 @@ bool CallExpr::isUnevaluatedBuiltinCall(ASTContext &Ctx) const { return false; } -QualType CallExpr::getCallReturnType() const { - QualType CalleeType = getCallee()->getType(); - if (const PointerType *FnTypePtr = CalleeType->getAs<PointerType>()) +QualType CallExpr::getCallReturnType(const ASTContext &Ctx) const { + const Expr *Callee = getCallee(); + QualType CalleeType = Callee->getType(); + if (const auto *FnTypePtr = CalleeType->getAs<PointerType>()) { CalleeType = FnTypePtr->getPointeeType(); - else if (const BlockPointerType *BPT = CalleeType->getAs<BlockPointerType>()) + } else if (const auto *BPT = CalleeType->getAs<BlockPointerType>()) { CalleeType = BPT->getPointeeType(); - else if (CalleeType->isSpecificPlaceholderType(BuiltinType::BoundMember)) + } else if (CalleeType->isSpecificPlaceholderType(BuiltinType::BoundMember)) { + if (isa<CXXPseudoDestructorExpr>(Callee->IgnoreParens())) + return Ctx.VoidTy; + // This should never be overloaded and so should never return null. - CalleeType = Expr::findBoundMemberType(getCallee()); - + CalleeType = Expr::findBoundMemberType(Callee); + } + const FunctionType *FnType = CalleeType->castAs<FunctionType>(); return FnType->getReturnType(); } @@ -1360,16 +1338,50 @@ IdentifierInfo *OffsetOfExpr::OffsetOfNode::getFieldName() const { return reinterpret_cast<IdentifierInfo *> (Data & ~(uintptr_t)Mask); } -MemberExpr *MemberExpr::Create(const ASTContext &C, Expr *base, bool isarrow, - NestedNameSpecifierLoc QualifierLoc, - SourceLocation TemplateKWLoc, - ValueDecl *memberdecl, - DeclAccessPair founddecl, - DeclarationNameInfo nameinfo, - const TemplateArgumentListInfo *targs, - QualType ty, - ExprValueKind vk, - ExprObjectKind ok) { +UnaryExprOrTypeTraitExpr::UnaryExprOrTypeTraitExpr( + UnaryExprOrTypeTrait ExprKind, Expr *E, QualType resultType, + SourceLocation op, SourceLocation rp) + : Expr(UnaryExprOrTypeTraitExprClass, resultType, VK_RValue, OK_Ordinary, + false, // Never type-dependent (C++ [temp.dep.expr]p3). + // Value-dependent if the argument is type-dependent. + E->isTypeDependent(), E->isInstantiationDependent(), + E->containsUnexpandedParameterPack()), + OpLoc(op), RParenLoc(rp) { + UnaryExprOrTypeTraitExprBits.Kind = ExprKind; + UnaryExprOrTypeTraitExprBits.IsType = false; + Argument.Ex = E; + + // Check to see if we are in the situation where alignof(decl) should be + // dependent because decl's alignment is dependent. + if (ExprKind == UETT_AlignOf) { + if (!isValueDependent() || !isInstantiationDependent()) { + E = E->IgnoreParens(); + + const ValueDecl *D = nullptr; + if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) + D = DRE->getDecl(); + else if (const auto *ME = dyn_cast<MemberExpr>(E)) + D = ME->getMemberDecl(); + + if (D) { + for (const auto *I : D->specific_attrs<AlignedAttr>()) { + if (I->isAlignmentDependent()) { + setValueDependent(true); + setInstantiationDependent(true); + break; + } + } + } + } + } +} + +MemberExpr *MemberExpr::Create( + const ASTContext &C, Expr *base, bool isarrow, SourceLocation OperatorLoc, + NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, + ValueDecl *memberdecl, DeclAccessPair founddecl, + DeclarationNameInfo nameinfo, const TemplateArgumentListInfo *targs, + QualType ty, ExprValueKind vk, ExprObjectKind ok) { std::size_t Size = sizeof(MemberExpr); bool hasQualOrFound = (QualifierLoc || @@ -1384,8 +1396,8 @@ MemberExpr *MemberExpr::Create(const ASTContext &C, Expr *base, bool isarrow, Size += ASTTemplateKWAndArgsInfo::sizeFor(0); void *Mem = C.Allocate(Size, llvm::alignOf<MemberExpr>()); - MemberExpr *E = new (Mem) MemberExpr(base, isarrow, memberdecl, nameinfo, - ty, vk, ok); + MemberExpr *E = new (Mem) + MemberExpr(base, isarrow, OperatorLoc, memberdecl, nameinfo, ty, vk, ok); if (hasQualOrFound) { // FIXME: Wrong. We should be looking at the member declaration we found. @@ -2132,8 +2144,8 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc, case OO_Greater: case OO_GreaterEqual: case OO_LessEqual: - if (Op->getCallReturnType()->isReferenceType() || - Op->getCallReturnType()->isVoidType()) + if (Op->getCallReturnType(Ctx)->isReferenceType() || + Op->getCallReturnType(Ctx)->isVoidType()) break; WarnE = this; Loc = Op->getOperatorLoc(); @@ -2149,12 +2161,16 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc, // If this is a direct call, get the callee. const CallExpr *CE = cast<CallExpr>(this); if (const Decl *FD = CE->getCalleeDecl()) { + const FunctionDecl *Func = dyn_cast<FunctionDecl>(FD); + bool HasWarnUnusedResultAttr = Func ? Func->hasUnusedResultAttr() + : FD->hasAttr<WarnUnusedResultAttr>(); + // If the callee has attribute pure, const, or warn_unused_result, warn // about it. void foo() { strlen("bar"); } should warn. // // Note: If new cases are added here, DiagnoseUnusedExprResult should be // updated to match for QoI. - if (FD->hasAttr<WarnUnusedResultAttr>() || + if (HasWarnUnusedResultAttr || FD->hasAttr<PureAttr>() || FD->hasAttr<ConstAttr>()) { WarnE = this; Loc = CE->getCallee()->getLocStart(); @@ -2200,9 +2216,7 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc, } if (const ObjCMethodDecl *MD = ME->getMethodDecl()) - if (MD->hasAttr<WarnUnusedResultAttr>() || - (MD->isPropertyAccessor() && !MD->getReturnType()->isVoidType() && - !ME->getReceiverType()->isObjCIdType())) { + if (MD->hasAttr<WarnUnusedResultAttr>()) { WarnE = this; Loc = getExprLoc(); return true; @@ -2387,7 +2401,7 @@ QualType Expr::findBoundMemberType(const Expr *expr) { return type; } - assert(isa<UnresolvedMemberExpr>(expr)); + assert(isa<UnresolvedMemberExpr>(expr) || isa<CXXPseudoDestructorExpr>(expr)); return QualType(); } @@ -2932,11 +2946,19 @@ bool Expr::HasSideEffects(const ASTContext &Ctx, case CXXOperatorCallExprClass: case CXXMemberCallExprClass: case CUDAKernelCallExprClass: + case UserDefinedLiteralClass: { + // We don't know a call definitely has side effects, except for calls + // to pure/const functions that definitely don't. + // If the call itself is considered side-effect free, check the operands. + const Decl *FD = cast<CallExpr>(this)->getCalleeDecl(); + bool IsPure = FD && (FD->hasAttr<ConstAttr>() || FD->hasAttr<PureAttr>()); + if (IsPure || !IncludePossibleEffects) + break; + return true; + } + case BlockExprClass: case CXXBindTemporaryExprClass: - case UserDefinedLiteralClass: - // We don't know a call definitely has side effects, but we can check the - // call's operands. if (!IncludePossibleEffects) break; return true; diff --git a/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp b/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp index 9336166..d6f2ce6 100644 --- a/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp +++ b/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp @@ -208,8 +208,9 @@ void CXXNewExpr::AllocateArgsArray(const ASTContext &C, bool isArray, } bool CXXNewExpr::shouldNullCheckAllocation(const ASTContext &Ctx) const { - return getOperatorNew()->getType()-> - castAs<FunctionProtoType>()->isNothrow(Ctx); + return getOperatorNew()->getType()->castAs<FunctionProtoType>()->isNothrow( + Ctx) && + !getOperatorNew()->isReservedGlobalPlacementOperator(); } // CXXDeleteExpr @@ -237,10 +238,7 @@ CXXPseudoDestructorExpr::CXXPseudoDestructorExpr(const ASTContext &Context, SourceLocation ColonColonLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType) : Expr(CXXPseudoDestructorExprClass, - Context.getPointerType(Context.getFunctionType( - Context.VoidTy, None, - FunctionProtoType::ExtProtoInfo( - Context.getDefaultCallingConvention(false, true)))), + Context.BoundMemberTy, VK_RValue, OK_Ordinary, /*isTypeDependent=*/(Base->isTypeDependent() || (DestroyedType.getTypeSourceInfo() && @@ -359,8 +357,7 @@ OverloadExpr::OverloadExpr(StmtClass K, const ASTContext &C, Results = static_cast<DeclAccessPair *>( C.Allocate(sizeof(DeclAccessPair) * NumResults, llvm::alignOf<DeclAccessPair>())); - memcpy(Results, &*Begin.getIterator(), - NumResults * sizeof(DeclAccessPair)); + memcpy(Results, Begin.I, NumResults * sizeof(DeclAccessPair)); } // If we have explicit template arguments, check for dependent @@ -401,8 +398,7 @@ void OverloadExpr::initializeResults(const ASTContext &C, C.Allocate(sizeof(DeclAccessPair) * NumResults, llvm::alignOf<DeclAccessPair>())); - memcpy(Results, &*Begin.getIterator(), - NumResults * sizeof(DeclAccessPair)); + memcpy(Results, Begin.I, NumResults * sizeof(DeclAccessPair)); } } @@ -1031,6 +1027,11 @@ LambdaExpr *LambdaExpr::CreateDeserialized(const ASTContext &C, return new (Mem) LambdaExpr(EmptyShell(), NumCaptures, NumArrayIndexVars > 0); } +bool LambdaExpr::isInitCapture(const LambdaCapture *C) const { + return (C->capturesVariable() && C->getCapturedVar()->isInitCapture() && + (getCallOperator() == C->getCapturedVar()->getDeclContext())); +} + LambdaExpr::capture_iterator LambdaExpr::capture_begin() const { return getLambdaClass()->getLambdaData().Captures; } diff --git a/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp b/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp index 933ea97..5b320c2 100644 --- a/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp +++ b/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp @@ -283,7 +283,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) { case Expr::CXXMemberCallExprClass: case Expr::UserDefinedLiteralClass: case Expr::CUDAKernelCallExprClass: - return ClassifyUnnamed(Ctx, cast<CallExpr>(E)->getCallReturnType()); + return ClassifyUnnamed(Ctx, cast<CallExpr>(E)->getCallReturnType(Ctx)); // __builtin_choose_expr is equivalent to the chosen expression. case Expr::ChooseExprClass: @@ -418,9 +418,10 @@ static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D) { islvalue = NTTParm->getType()->isReferenceType(); else islvalue = isa<VarDecl>(D) || isa<FieldDecl>(D) || - isa<IndirectFieldDecl>(D) || - (Ctx.getLangOpts().CPlusPlus && - (isa<FunctionDecl>(D) || isa<FunctionTemplateDecl>(D))); + isa<IndirectFieldDecl>(D) || + (Ctx.getLangOpts().CPlusPlus && + (isa<FunctionDecl>(D) || isa<MSPropertyDecl>(D) || + isa<FunctionTemplateDecl>(D))); return islvalue ? Cl::CL_LValue : Cl::CL_PRValue; } @@ -605,7 +606,7 @@ static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E, if (CT.isConstQualified()) return Cl::CM_ConstQualified; if (CT.getQualifiers().getAddressSpace() == LangAS::opencl_constant) - return Cl::CM_ConstQualified; + return Cl::CM_ConstAddrSpace; // Arrays are not modifiable, only their elements are. if (CT->isArrayType()) @@ -671,6 +672,7 @@ Expr::isModifiableLvalue(ASTContext &Ctx, SourceLocation *Loc) const { llvm_unreachable("CM_LValueCast and CL_LValue don't match"); case Cl::CM_NoSetterProperty: return MLV_NoSetterProperty; case Cl::CM_ConstQualified: return MLV_ConstQualified; + case Cl::CM_ConstAddrSpace: return MLV_ConstAddrSpace; case Cl::CM_ArrayType: return MLV_ArrayType; case Cl::CM_IncompleteType: return MLV_IncompleteType; } diff --git a/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp b/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp index 3d7f2dc..d1ec7ae 100644 --- a/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp +++ b/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp @@ -1406,7 +1406,7 @@ static bool CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc, return true; } -const ValueDecl *GetLValueBaseDecl(const LValue &LVal) { +static const ValueDecl *GetLValueBaseDecl(const LValue &LVal) { return LVal.Base.dyn_cast<const ValueDecl*>(); } @@ -2173,7 +2173,7 @@ struct CompleteObject { assert(Value && "missing value for complete object"); } - LLVM_EXPLICIT operator bool() const { return Value; } + explicit operator bool() const { return Value; } }; /// Find the designated sub-object of an rvalue. @@ -2502,8 +2502,9 @@ static bool AreElementsOfSameArray(QualType ObjType, } /// Find the complete object to which an LValue refers. -CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E, AccessKinds AK, - const LValue &LVal, QualType LValType) { +static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E, + AccessKinds AK, const LValue &LVal, + QualType LValType) { if (!LVal.Base) { Info.Diag(E, diag::note_constexpr_access_null) << AK; return CompleteObject(); @@ -3726,8 +3727,9 @@ static bool HandleFunctionCall(SourceLocation CallLoc, // Skip this for non-union classes with no fields; in that case, the defaulted // copy/move does not actually read the object. const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Callee); - if (MD && MD->isDefaulted() && MD->isTrivial() && - (MD->getParent()->isUnion() || hasFields(MD->getParent()))) { + if (MD && MD->isDefaulted() && + (MD->getParent()->isUnion() || + (MD->isTrivial() && hasFields(MD->getParent())))) { assert(This && (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator())); LValue RHS; @@ -3791,11 +3793,9 @@ static bool HandleConstructorCall(SourceLocation CallLoc, const LValue &This, // Skip this for empty non-union classes; we should not perform an // lvalue-to-rvalue conversion on them because their copy constructor does not // actually read them. - if (Definition->isDefaulted() && - ((Definition->isCopyConstructor() && Definition->isTrivial()) || - (Definition->isMoveConstructor() && Definition->isTrivial())) && + if (Definition->isDefaulted() && Definition->isCopyOrMoveConstructor() && (Definition->getParent()->isUnion() || - hasFields(Definition->getParent()))) { + (Definition->isTrivial() && hasFields(Definition->getParent())))) { LValue RHS; RHS.setFrom(Info.Ctx, ArgValues[0]); return handleLValueToRValueConversion(Info, Args[0], Args[0]->getType(), @@ -6834,7 +6834,7 @@ void DataRecursiveIntBinOpEvaluator::process(EvalResult &Result) { } bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) { - if (E->isAssignmentOp()) + if (!Info.keepEvaluatingAfterFailure() && E->isAssignmentOp()) return Error(E); if (DataRecursiveIntBinOpEvaluator::shouldEnqueue(E)) @@ -6846,7 +6846,11 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) { if (LHSTy->isAnyComplexType() || RHSTy->isAnyComplexType()) { ComplexValue LHS, RHS; bool LHSOK; - if (E->getLHS()->getType()->isRealFloatingType()) { + if (E->isAssignmentOp()) { + LValue LV; + EvaluateLValue(E->getLHS(), LV, Info); + LHSOK = false; + } else if (LHSTy->isRealFloatingType()) { LHSOK = EvaluateFloat(E->getLHS(), LHS.FloatReal, Info); if (LHSOK) { LHS.makeComplexFloat(); @@ -7586,10 +7590,23 @@ static bool TryEvaluateBuiltinNaN(const ASTContext &Context, else if (S->getString().getAsInteger(0, fill)) return false; - if (SNaN) - Result = llvm::APFloat::getSNaN(Sem, false, &fill); - else - Result = llvm::APFloat::getQNaN(Sem, false, &fill); + if (Context.getTargetInfo().isNan2008()) { + if (SNaN) + Result = llvm::APFloat::getSNaN(Sem, false, &fill); + else + Result = llvm::APFloat::getQNaN(Sem, false, &fill); + } else { + // Prior to IEEE 754-2008, architectures were allowed to choose whether + // the first bit of their significand was set for qNaN or sNaN. MIPS chose + // a different encoding to what became a standard in 2008, and for pre- + // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as + // sNaN. This is now known as "legacy NaN" encoding. + if (SNaN) + Result = llvm::APFloat::getQNaN(Sem, false, &fill); + else + Result = llvm::APFloat::getSNaN(Sem, false, &fill); + } + return true; } diff --git a/contrib/llvm/tools/clang/lib/AST/ExternalASTSource.cpp b/contrib/llvm/tools/clang/lib/AST/ExternalASTSource.cpp index 8894107..730842a 100644 --- a/contrib/llvm/tools/clang/lib/AST/ExternalASTSource.cpp +++ b/contrib/llvm/tools/clang/lib/AST/ExternalASTSource.cpp @@ -66,6 +66,11 @@ Stmt *ExternalASTSource::GetExternalDeclStmt(uint64_t Offset) { return nullptr; } +CXXCtorInitializer ** +ExternalASTSource::GetExternalCXXCtorInitializers(uint64_t Offset) { + return nullptr; +} + CXXBaseSpecifier * ExternalASTSource::GetExternalCXXBaseSpecifiers(uint64_t Offset) { return nullptr; diff --git a/contrib/llvm/tools/clang/lib/AST/InheritViz.cpp b/contrib/llvm/tools/clang/lib/AST/InheritViz.cpp index eb3020c..0b82da1 100644 --- a/contrib/llvm/tools/clang/lib/AST/InheritViz.cpp +++ b/contrib/llvm/tools/clang/lib/AST/InheritViz.cpp @@ -22,11 +22,9 @@ #include "llvm/Support/raw_ostream.h" #include <map> #include <set> +using namespace clang; -using namespace llvm; - -namespace clang { - +namespace { /// InheritanceHierarchyWriter - Helper class that writes out a /// GraphViz file that diagrams the inheritance hierarchy starting at /// a given C++ class type. Note that we do not use LLVM's @@ -44,7 +42,8 @@ public: : Context(Context), Out(Out) { } void WriteGraph(QualType Type) { - Out << "digraph \"" << DOT::EscapeString(Type.getAsString()) << "\" {\n"; + Out << "digraph \"" << llvm::DOT::EscapeString(Type.getAsString()) + << "\" {\n"; WriteNode(Type, false); Out << "}\n"; } @@ -59,6 +58,7 @@ protected: /// (only) virtual base. raw_ostream& WriteNodeReference(QualType Type, bool FromVirtual); }; +} // namespace void InheritanceHierarchyWriter::WriteNode(QualType Type, bool FromVirtual) { QualType CanonType = Context.getCanonicalType(Type); @@ -78,7 +78,7 @@ void InheritanceHierarchyWriter::WriteNode(QualType Type, bool FromVirtual) { // Give the node a label based on the name of the class. std::string TypeName = Type.getAsString(); - Out << " [ shape=\"box\", label=\"" << DOT::EscapeString(TypeName); + Out << " [ shape=\"box\", label=\"" << llvm::DOT::EscapeString(TypeName); // If the name of the class was a typedef or something different // from the "real" class name, show the real class name in @@ -139,9 +139,8 @@ void CXXRecordDecl::viewInheritance(ASTContext& Context) const { int FD; SmallString<128> Filename; - std::error_code EC = - sys::fs::createTemporaryFile(Self.getAsString(), "dot", FD, Filename); - if (EC) { + if (std::error_code EC = llvm::sys::fs::createTemporaryFile( + Self.getAsString(), "dot", FD, Filename)) { llvm::errs() << "Error: " << EC.message() << "\n"; return; } @@ -159,5 +158,3 @@ void CXXRecordDecl::viewInheritance(ASTContext& Context) const { // Display the graph DisplayGraph(Filename); } - -} diff --git a/contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp b/contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp index 378121c..7503cbf 100644 --- a/contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp +++ b/contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp @@ -106,7 +106,7 @@ public: TargetInfo::IntType PtrDiff = Target.getPtrDiffType(0); uint64_t Width = Target.getTypeWidth(PtrDiff); unsigned Align = Target.getTypeAlign(PtrDiff); - if (MPT->getPointeeType()->isFunctionType()) + if (MPT->isMemberFunctionPointer()) Width = 2 * Width; return std::make_pair(Width, Align); } @@ -133,6 +133,22 @@ public: return Layout.getNonVirtualSize() == PointerSize; } + const CXXConstructorDecl * + getCopyConstructorForExceptionObject(CXXRecordDecl *RD) override { + return nullptr; + } + + void addCopyConstructorForExceptionObject(CXXRecordDecl *RD, + CXXConstructorDecl *CD) override {} + + void addDefaultArgExprForConstructor(const CXXConstructorDecl *CD, + unsigned ParmIdx, Expr *DAE) override {} + + Expr *getDefaultArgExprForConstructor(const CXXConstructorDecl *CD, + unsigned ParmIdx) override { + return nullptr; + } + MangleNumberingContext *createMangleNumberingContext() const override { return new ItaniumNumberingContext(); } diff --git a/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp b/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp index 156ad64..d07efae 100644 --- a/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp +++ b/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp @@ -42,8 +42,8 @@ using namespace clang; namespace { -/// \brief Retrieve the declaration context that should be used when mangling -/// the given declaration. +/// Retrieve the declaration context that should be used when mangling the given +/// declaration. static const DeclContext *getEffectiveDeclContext(const Decl *D) { // The ABI assumes that lambda closure types that occur within // default arguments live in the context of the function. However, due to @@ -69,6 +69,14 @@ static const DeclContext *getEffectiveDeclContext(const Decl *D) { if (const CapturedDecl *CD = dyn_cast<CapturedDecl>(DC)) return getEffectiveDeclContext(CD); + if (const auto *VD = dyn_cast<VarDecl>(D)) + if (VD->isExternC()) + return VD->getASTContext().getTranslationUnitDecl(); + + if (const auto *FD = dyn_cast<FunctionDecl>(D)) + if (FD->isExternC()) + return FD->getASTContext().getTranslationUnitDecl(); + return DC; } @@ -156,12 +164,18 @@ public: void mangleDynamicInitializer(const VarDecl *D, raw_ostream &Out) override; void mangleDynamicAtExitDestructor(const VarDecl *D, raw_ostream &Out) override; + void mangleSEHFilterExpression(const NamedDecl *EnclosingDecl, + raw_ostream &Out) override; + void mangleSEHFinallyBlock(const NamedDecl *EnclosingDecl, + raw_ostream &Out) override; void mangleItaniumThreadLocalInit(const VarDecl *D, raw_ostream &) override; void mangleItaniumThreadLocalWrapper(const VarDecl *D, raw_ostream &) override; void mangleStringLiteral(const StringLiteral *, raw_ostream &) override; + void mangleCXXVTableBitSet(const CXXRecordDecl *RD, raw_ostream &) override; + bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) { // Lambda closure types are already numbered. if (isLambda(ND)) @@ -196,7 +210,7 @@ public: /// @} }; -/// CXXNameMangler - Manage the mangling of a single name. +/// Manage the mangling of a single name. class CXXNameMangler { ItaniumMangleContextImpl &Context; raw_ostream &Out; @@ -207,7 +221,7 @@ class CXXNameMangler { const NamedDecl *Structor; unsigned StructorType; - /// SeqID - The next subsitution sequence number. + /// The next substitution sequence number. unsigned SeqID; class FunctionTypeDepthState { @@ -284,7 +298,7 @@ public: #endif raw_ostream &getStream() { return Out; } - void mangle(const NamedDecl *D, StringRef Prefix = "_Z"); + void mangle(const NamedDecl *D); void mangleCallOffset(int64_t NonVirtual, int64_t Virtual); void mangleNumber(const llvm::APSInt &I); void mangleNumber(int64_t Number); @@ -317,10 +331,8 @@ private: void addSubstitution(uintptr_t Ptr); void mangleUnresolvedPrefix(NestedNameSpecifier *qualifier, - NamedDecl *firstQualifierLookup, bool recursive = false); void mangleUnresolvedName(NestedNameSpecifier *qualifier, - NamedDecl *firstQualifierLookup, DeclarationName name, unsigned KnownArity = UnknownArity); @@ -350,6 +362,9 @@ private: void manglePrefix(QualType type); void mangleTemplatePrefix(const TemplateDecl *ND, bool NoFunction=false); void mangleTemplatePrefix(TemplateName Template); + bool mangleUnresolvedTypeOrSimpleId(QualType DestroyedType, + StringRef Prefix = ""); + void mangleOperatorName(DeclarationName Name, unsigned Arity); void mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity); void mangleQualifiers(Qualifiers Quals); void mangleRefQualifier(RefQualifierKind RefQualifier); @@ -370,12 +385,14 @@ private: void mangleAArch64NeonVectorType(const VectorType *T); void mangleIntegerLiteral(QualType T, const llvm::APSInt &Value); + void mangleMemberExprBase(const Expr *base, bool isArrow); void mangleMemberExpr(const Expr *base, bool isArrow, NestedNameSpecifier *qualifier, NamedDecl *firstQualifierLookup, DeclarationName name, unsigned knownArity); void mangleCastExpression(const Expr *E, StringRef CastEncoding); + void mangleInitListElements(const InitListExpr *InitList); void mangleExpression(const Expr *E, unsigned Arity = UnknownArity); void mangleCXXCtorType(CXXCtorType T); void mangleCXXDtorType(CXXDtorType T); @@ -439,11 +456,11 @@ bool ItaniumMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) { return true; } -void CXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) { +void CXXNameMangler::mangle(const NamedDecl *D) { // <mangled-name> ::= _Z <encoding> // ::= <data name> // ::= <special-name> - Out << Prefix; + Out << "_Z"; if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) mangleFunctionEncoding(FD); else if (const VarDecl *VD = dyn_cast<VarDecl>(D)) @@ -519,7 +536,7 @@ static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) { return DC; } -/// isStd - Return whether a given namespace is the 'std' namespace. +/// Return whether a given namespace is the 'std' namespace. static bool isStd(const NamespaceDecl *NS) { if (!IgnoreLinkageSpecDecls(getEffectiveParentContext(NS)) ->isTranslationUnit()) @@ -748,8 +765,7 @@ void CXXNameMangler::mangleCallOffset(int64_t NonVirtual, int64_t Virtual) { } void CXXNameMangler::manglePrefix(QualType type) { - if (const TemplateSpecializationType *TST = - type->getAs<TemplateSpecializationType>()) { + if (const auto *TST = type->getAs<TemplateSpecializationType>()) { if (!mangleSubstitution(QualType(TST, 0))) { mangleTemplatePrefix(TST->getTemplateName()); @@ -759,17 +775,19 @@ void CXXNameMangler::manglePrefix(QualType type) { mangleTemplateArgs(TST->getArgs(), TST->getNumArgs()); addSubstitution(QualType(TST, 0)); } - } else if (const DependentTemplateSpecializationType *DTST - = type->getAs<DependentTemplateSpecializationType>()) { - TemplateName Template - = getASTContext().getDependentTemplateName(DTST->getQualifier(), - DTST->getIdentifier()); - mangleTemplatePrefix(Template); + } else if (const auto *DTST = + type->getAs<DependentTemplateSpecializationType>()) { + if (!mangleSubstitution(QualType(DTST, 0))) { + TemplateName Template = getASTContext().getDependentTemplateName( + DTST->getQualifier(), DTST->getIdentifier()); + mangleTemplatePrefix(Template); - // FIXME: GCC does not appear to mangle the template arguments when - // the template in question is a dependent template name. Should we - // emulate that badness? - mangleTemplateArgs(DTST->getArgs(), DTST->getNumArgs()); + // FIXME: GCC does not appear to mangle the template arguments when + // the template in question is a dependent template name. Should we + // emulate that badness? + mangleTemplateArgs(DTST->getArgs(), DTST->getNumArgs()); + addSubstitution(QualType(DTST, 0)); + } } else { // We use the QualType mangle type variant here because it handles // substitutions. @@ -779,12 +797,9 @@ void CXXNameMangler::manglePrefix(QualType type) { /// Mangle everything prior to the base-unresolved-name in an unresolved-name. /// -/// \param firstQualifierLookup - the entity found by unqualified lookup -/// for the first name in the qualifier, if this is for a member expression /// \param recursive - true if this is being called recursively, /// i.e. if there is more prefix "to the right". void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier, - NamedDecl *firstQualifierLookup, bool recursive) { // x, ::x @@ -817,7 +832,7 @@ void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier, case NestedNameSpecifier::Namespace: if (qualifier->getPrefix()) - mangleUnresolvedPrefix(qualifier->getPrefix(), firstQualifierLookup, + mangleUnresolvedPrefix(qualifier->getPrefix(), /*recursive*/ true); else Out << "sr"; @@ -825,7 +840,7 @@ void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier, break; case NestedNameSpecifier::NamespaceAlias: if (qualifier->getPrefix()) - mangleUnresolvedPrefix(qualifier->getPrefix(), firstQualifierLookup, + mangleUnresolvedPrefix(qualifier->getPrefix(), /*recursive*/ true); else Out << "sr"; @@ -842,193 +857,26 @@ void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier, // - a template template parameter with arguments // In all of these cases, we should have no prefix. if (qualifier->getPrefix()) { - mangleUnresolvedPrefix(qualifier->getPrefix(), firstQualifierLookup, + mangleUnresolvedPrefix(qualifier->getPrefix(), /*recursive*/ true); } else { // Otherwise, all the cases want this. Out << "sr"; } - // Only certain other types are valid as prefixes; enumerate them. - switch (type->getTypeClass()) { - case Type::Builtin: - case Type::Complex: - case Type::Adjusted: - case Type::Decayed: - case Type::Pointer: - case Type::BlockPointer: - case Type::LValueReference: - case Type::RValueReference: - case Type::MemberPointer: - case Type::ConstantArray: - case Type::IncompleteArray: - case Type::VariableArray: - case Type::DependentSizedArray: - case Type::DependentSizedExtVector: - case Type::Vector: - case Type::ExtVector: - case Type::FunctionProto: - case Type::FunctionNoProto: - case Type::Enum: - case Type::Paren: - case Type::Elaborated: - case Type::Attributed: - case Type::Auto: - case Type::PackExpansion: - case Type::ObjCObject: - case Type::ObjCInterface: - case Type::ObjCObjectPointer: - case Type::Atomic: - llvm_unreachable("type is illegal as a nested name specifier"); - - case Type::SubstTemplateTypeParmPack: - // FIXME: not clear how to mangle this! - // template <class T...> class A { - // template <class U...> void foo(decltype(T::foo(U())) x...); - // }; - Out << "_SUBSTPACK_"; - break; - - // <unresolved-type> ::= <template-param> - // ::= <decltype> - // ::= <template-template-param> <template-args> - // (this last is not official yet) - case Type::TypeOfExpr: - case Type::TypeOf: - case Type::Decltype: - case Type::TemplateTypeParm: - case Type::UnaryTransform: - case Type::SubstTemplateTypeParm: - unresolvedType: - assert(!qualifier->getPrefix()); - - // We only get here recursively if we're followed by identifiers. - if (recursive) Out << 'N'; - - // This seems to do everything we want. It's not really - // sanctioned for a substituted template parameter, though. - mangleType(QualType(type, 0)); - - // We never want to print 'E' directly after an unresolved-type, - // so we return directly. + if (mangleUnresolvedTypeOrSimpleId(QualType(type, 0), recursive ? "N" : "")) return; - case Type::Typedef: - mangleSourceName(cast<TypedefType>(type)->getDecl()->getIdentifier()); - break; - - case Type::UnresolvedUsing: - mangleSourceName(cast<UnresolvedUsingType>(type)->getDecl() - ->getIdentifier()); - break; - - case Type::Record: - mangleSourceName(cast<RecordType>(type)->getDecl()->getIdentifier()); - break; - - case Type::TemplateSpecialization: { - const TemplateSpecializationType *tst - = cast<TemplateSpecializationType>(type); - TemplateName name = tst->getTemplateName(); - switch (name.getKind()) { - case TemplateName::Template: - case TemplateName::QualifiedTemplate: { - TemplateDecl *temp = name.getAsTemplateDecl(); - - // If the base is a template template parameter, this is an - // unresolved type. - assert(temp && "no template for template specialization type"); - if (isa<TemplateTemplateParmDecl>(temp)) goto unresolvedType; - - mangleSourceName(temp->getIdentifier()); - break; - } - - case TemplateName::OverloadedTemplate: - case TemplateName::DependentTemplate: - llvm_unreachable("invalid base for a template specialization type"); - - case TemplateName::SubstTemplateTemplateParm: { - SubstTemplateTemplateParmStorage *subst - = name.getAsSubstTemplateTemplateParm(); - mangleExistingSubstitution(subst->getReplacement()); - break; - } - - case TemplateName::SubstTemplateTemplateParmPack: { - // FIXME: not clear how to mangle this! - // template <template <class U> class T...> class A { - // template <class U...> void foo(decltype(T<U>::foo) x...); - // }; - Out << "_SUBSTPACK_"; - break; - } - } - - mangleTemplateArgs(tst->getArgs(), tst->getNumArgs()); - break; - } - - case Type::InjectedClassName: - mangleSourceName(cast<InjectedClassNameType>(type)->getDecl() - ->getIdentifier()); - break; - - case Type::DependentName: - mangleSourceName(cast<DependentNameType>(type)->getIdentifier()); - break; - - case Type::DependentTemplateSpecialization: { - const DependentTemplateSpecializationType *tst - = cast<DependentTemplateSpecializationType>(type); - mangleSourceName(tst->getIdentifier()); - mangleTemplateArgs(tst->getArgs(), tst->getNumArgs()); - break; - } - } break; } case NestedNameSpecifier::Identifier: // Member expressions can have these without prefixes. - if (qualifier->getPrefix()) { - mangleUnresolvedPrefix(qualifier->getPrefix(), firstQualifierLookup, + if (qualifier->getPrefix()) + mangleUnresolvedPrefix(qualifier->getPrefix(), /*recursive*/ true); - } else if (firstQualifierLookup) { - - // Try to make a proper qualifier out of the lookup result, and - // then just recurse on that. - NestedNameSpecifier *newQualifier; - if (TypeDecl *typeDecl = dyn_cast<TypeDecl>(firstQualifierLookup)) { - QualType type = getASTContext().getTypeDeclType(typeDecl); - - // Pretend we had a different nested name specifier. - newQualifier = NestedNameSpecifier::Create(getASTContext(), - /*prefix*/ nullptr, - /*template*/ false, - type.getTypePtr()); - } else if (NamespaceDecl *nspace = - dyn_cast<NamespaceDecl>(firstQualifierLookup)) { - newQualifier = NestedNameSpecifier::Create(getASTContext(), - /*prefix*/ nullptr, - nspace); - } else if (NamespaceAliasDecl *alias = - dyn_cast<NamespaceAliasDecl>(firstQualifierLookup)) { - newQualifier = NestedNameSpecifier::Create(getASTContext(), - /*prefix*/ nullptr, - alias); - } else { - // No sensible mangling to do here. - newQualifier = nullptr; - } - - if (newQualifier) - return mangleUnresolvedPrefix(newQualifier, /*lookup*/ nullptr, - recursive); - - } else { + else Out << "sr"; - } mangleSourceName(qualifier->getAsIdentifier()); break; @@ -1043,16 +891,41 @@ void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier, /// Mangle an unresolved-name, which is generally used for names which /// weren't resolved to specific entities. void CXXNameMangler::mangleUnresolvedName(NestedNameSpecifier *qualifier, - NamedDecl *firstQualifierLookup, DeclarationName name, unsigned knownArity) { - if (qualifier) mangleUnresolvedPrefix(qualifier, firstQualifierLookup); - mangleUnqualifiedName(nullptr, name, knownArity); + if (qualifier) mangleUnresolvedPrefix(qualifier); + switch (name.getNameKind()) { + // <base-unresolved-name> ::= <simple-id> + case DeclarationName::Identifier: + mangleSourceName(name.getAsIdentifierInfo()); + break; + // <base-unresolved-name> ::= dn <destructor-name> + case DeclarationName::CXXDestructorName: + Out << "dn"; + mangleUnresolvedTypeOrSimpleId(name.getCXXNameType()); + break; + // <base-unresolved-name> ::= on <operator-name> + case DeclarationName::CXXConversionFunctionName: + case DeclarationName::CXXLiteralOperatorName: + case DeclarationName::CXXOperatorName: + Out << "on"; + mangleOperatorName(name, knownArity); + break; + case DeclarationName::CXXConstructorName: + llvm_unreachable("Can't mangle a constructor name!"); + case DeclarationName::CXXUsingDirective: + llvm_unreachable("Can't mangle a using directive name!"); + case DeclarationName::ObjCMultiArgSelector: + case DeclarationName::ObjCOneArgSelector: + case DeclarationName::ObjCZeroArgSelector: + llvm_unreachable("Can't mangle Objective-C selector names here!"); + } } void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name, unsigned KnownArity) { + unsigned Arity = KnownArity; // <unqualified-name> ::= <operator-name> // ::= <ctor-dtor-name> // ::= <source-name> @@ -1163,7 +1036,7 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND, Str += llvm::utostr(AnonStructId); Out << Str.size(); - Out << Str.str(); + Out << Str; break; } @@ -1194,33 +1067,19 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND, mangleCXXDtorType(Dtor_Complete); break; - case DeclarationName::CXXConversionFunctionName: - // <operator-name> ::= cv <type> # (cast) - Out << "cv"; - mangleType(Name.getCXXNameType()); - break; - - case DeclarationName::CXXOperatorName: { - unsigned Arity; - if (ND) { + case DeclarationName::CXXOperatorName: + if (ND && Arity == UnknownArity) { Arity = cast<FunctionDecl>(ND)->getNumParams(); - // If we have a C++ member function, we need to include the 'this' pointer. - // FIXME: This does not make sense for operators that are static, but their - // names stay the same regardless of the arity (operator new for instance). - if (isa<CXXMethodDecl>(ND)) - Arity++; - } else - Arity = KnownArity; - - mangleOperatorName(Name.getCXXOverloadedOperator(), Arity); - break; - } - + // If we have a member function, we need to include the 'this' pointer. + if (const auto *MD = dyn_cast<CXXMethodDecl>(ND)) + if (!MD->isStatic()) + Arity++; + } + // FALLTHROUGH + case DeclarationName::CXXConversionFunctionName: case DeclarationName::CXXLiteralOperatorName: - // FIXME: This mangling is not yet official. - Out << "li"; - mangleSourceName(Name.getCXXLiteralIdentifier()); + mangleOperatorName(Name, Arity); break; case DeclarationName::CXXUsingDirective: @@ -1529,7 +1388,8 @@ void CXXNameMangler::mangleTemplatePrefix(TemplateName Template) { DependentTemplateName *Dependent = Template.getAsDependentTemplateName(); assert(Dependent && "Unknown template name kind?"); - manglePrefix(Dependent->getQualifier()); + if (NestedNameSpecifier *Qualifier = Dependent->getQualifier()) + manglePrefix(Qualifier); mangleUnscopedTemplateName(Template); } @@ -1591,7 +1451,7 @@ void CXXNameMangler::mangleType(TemplateName TN) { // <class-enum-type> ::= <name> // <name> ::= <nested-name> - mangleUnresolvedPrefix(Dependent->getQualifier(), nullptr); + mangleUnresolvedPrefix(Dependent->getQualifier()); mangleSourceName(Dependent->getIdentifier()); break; } @@ -1620,6 +1480,181 @@ void CXXNameMangler::mangleType(TemplateName TN) { addSubstitution(TN); } +bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty, + StringRef Prefix) { + // Only certain other types are valid as prefixes; enumerate them. + switch (Ty->getTypeClass()) { + case Type::Builtin: + case Type::Complex: + case Type::Adjusted: + case Type::Decayed: + case Type::Pointer: + case Type::BlockPointer: + case Type::LValueReference: + case Type::RValueReference: + case Type::MemberPointer: + case Type::ConstantArray: + case Type::IncompleteArray: + case Type::VariableArray: + case Type::DependentSizedArray: + case Type::DependentSizedExtVector: + case Type::Vector: + case Type::ExtVector: + case Type::FunctionProto: + case Type::FunctionNoProto: + case Type::Paren: + case Type::Attributed: + case Type::Auto: + case Type::PackExpansion: + case Type::ObjCObject: + case Type::ObjCInterface: + case Type::ObjCObjectPointer: + case Type::Atomic: + llvm_unreachable("type is illegal as a nested name specifier"); + + case Type::SubstTemplateTypeParmPack: + // FIXME: not clear how to mangle this! + // template <class T...> class A { + // template <class U...> void foo(decltype(T::foo(U())) x...); + // }; + Out << "_SUBSTPACK_"; + break; + + // <unresolved-type> ::= <template-param> + // ::= <decltype> + // ::= <template-template-param> <template-args> + // (this last is not official yet) + case Type::TypeOfExpr: + case Type::TypeOf: + case Type::Decltype: + case Type::TemplateTypeParm: + case Type::UnaryTransform: + case Type::SubstTemplateTypeParm: + unresolvedType: + // Some callers want a prefix before the mangled type. + Out << Prefix; + + // This seems to do everything we want. It's not really + // sanctioned for a substituted template parameter, though. + mangleType(Ty); + + // We never want to print 'E' directly after an unresolved-type, + // so we return directly. + return true; + + case Type::Typedef: + mangleSourceName(cast<TypedefType>(Ty)->getDecl()->getIdentifier()); + break; + + case Type::UnresolvedUsing: + mangleSourceName( + cast<UnresolvedUsingType>(Ty)->getDecl()->getIdentifier()); + break; + + case Type::Enum: + case Type::Record: + mangleSourceName(cast<TagType>(Ty)->getDecl()->getIdentifier()); + break; + + case Type::TemplateSpecialization: { + const TemplateSpecializationType *TST = + cast<TemplateSpecializationType>(Ty); + TemplateName TN = TST->getTemplateName(); + switch (TN.getKind()) { + case TemplateName::Template: + case TemplateName::QualifiedTemplate: { + TemplateDecl *TD = TN.getAsTemplateDecl(); + + // If the base is a template template parameter, this is an + // unresolved type. + assert(TD && "no template for template specialization type"); + if (isa<TemplateTemplateParmDecl>(TD)) + goto unresolvedType; + + mangleSourceName(TD->getIdentifier()); + break; + } + + case TemplateName::OverloadedTemplate: + case TemplateName::DependentTemplate: + llvm_unreachable("invalid base for a template specialization type"); + + case TemplateName::SubstTemplateTemplateParm: { + SubstTemplateTemplateParmStorage *subst = + TN.getAsSubstTemplateTemplateParm(); + mangleExistingSubstitution(subst->getReplacement()); + break; + } + + case TemplateName::SubstTemplateTemplateParmPack: { + // FIXME: not clear how to mangle this! + // template <template <class U> class T...> class A { + // template <class U...> void foo(decltype(T<U>::foo) x...); + // }; + Out << "_SUBSTPACK_"; + break; + } + } + + mangleTemplateArgs(TST->getArgs(), TST->getNumArgs()); + break; + } + + case Type::InjectedClassName: + mangleSourceName( + cast<InjectedClassNameType>(Ty)->getDecl()->getIdentifier()); + break; + + case Type::DependentName: + mangleSourceName(cast<DependentNameType>(Ty)->getIdentifier()); + break; + + case Type::DependentTemplateSpecialization: { + const DependentTemplateSpecializationType *DTST = + cast<DependentTemplateSpecializationType>(Ty); + mangleSourceName(DTST->getIdentifier()); + mangleTemplateArgs(DTST->getArgs(), DTST->getNumArgs()); + break; + } + + case Type::Elaborated: + return mangleUnresolvedTypeOrSimpleId( + cast<ElaboratedType>(Ty)->getNamedType(), Prefix); + } + + return false; +} + +void CXXNameMangler::mangleOperatorName(DeclarationName Name, unsigned Arity) { + switch (Name.getNameKind()) { + case DeclarationName::CXXConstructorName: + case DeclarationName::CXXDestructorName: + case DeclarationName::CXXUsingDirective: + case DeclarationName::Identifier: + case DeclarationName::ObjCMultiArgSelector: + case DeclarationName::ObjCOneArgSelector: + case DeclarationName::ObjCZeroArgSelector: + llvm_unreachable("Not an operator name"); + + case DeclarationName::CXXConversionFunctionName: + // <operator-name> ::= cv <type> # (cast) + Out << "cv"; + mangleType(Name.getCXXNameType()); + break; + + case DeclarationName::CXXLiteralOperatorName: + Out << "li"; + mangleSourceName(Name.getCXXLiteralIdentifier()); + return; + + case DeclarationName::CXXOperatorName: + mangleOperatorName(Name.getCXXOverloadedOperator(), Arity); + break; + } +} + + + void CXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity) { switch (OO) { @@ -2276,6 +2311,7 @@ void CXXNameMangler::mangleAArch64NeonVectorType(const VectorType *T) { EltName = "Poly16"; break; case BuiltinType::ULong: + case BuiltinType::ULongLong: EltName = "Poly64"; break; default: @@ -2519,6 +2555,29 @@ void CXXNameMangler::mangleIntegerLiteral(QualType T, } +void CXXNameMangler::mangleMemberExprBase(const Expr *Base, bool IsArrow) { + // Ignore member expressions involving anonymous unions. + while (const auto *RT = Base->getType()->getAs<RecordType>()) { + if (!RT->getDecl()->isAnonymousStructOrUnion()) + break; + const auto *ME = dyn_cast<MemberExpr>(Base); + if (!ME) + break; + Base = ME->getBase(); + IsArrow = ME->isArrow(); + } + + if (Base->isImplicitCXXThis()) { + // Note: GCC mangles member expressions to the implicit 'this' as + // *this., whereas we represent them as this->. The Itanium C++ ABI + // does not specify anything here, so we follow GCC. + Out << "dtdefpT"; + } else { + Out << (IsArrow ? "pt" : "dt"); + mangleExpression(Base); + } +} + /// Mangles a member expression. void CXXNameMangler::mangleMemberExpr(const Expr *base, bool isArrow, @@ -2528,30 +2587,9 @@ void CXXNameMangler::mangleMemberExpr(const Expr *base, unsigned arity) { // <expression> ::= dt <expression> <unresolved-name> // ::= pt <expression> <unresolved-name> - if (base) { - - // Ignore member expressions involving anonymous unions. - while (const auto *RT = base->getType()->getAs<RecordType>()) { - if (!RT->getDecl()->isAnonymousStructOrUnion()) - break; - const auto *ME = dyn_cast<MemberExpr>(base); - if (!ME) - break; - base = ME->getBase(); - isArrow = ME->isArrow(); - } - - if (base->isImplicitCXXThis()) { - // Note: GCC mangles member expressions to the implicit 'this' as - // *this., whereas we represent them as this->. The Itanium C++ ABI - // does not specify anything here, so we follow GCC. - Out << "dtdefpT"; - } else { - Out << (isArrow ? "pt" : "dt"); - mangleExpression(base); - } - } - mangleUnresolvedName(qualifier, firstQualifierLookup, member, arity); + if (base) + mangleMemberExprBase(base, isArrow); + mangleUnresolvedName(qualifier, member, arity); } /// Look at the callee of the given call expression and determine if @@ -2592,6 +2630,13 @@ void CXXNameMangler::mangleCastExpression(const Expr *E, StringRef CastEncoding) mangleExpression(ECE->getSubExpr()); } +void CXXNameMangler::mangleInitListElements(const InitListExpr *InitList) { + if (auto *Syntactic = InitList->getSyntacticForm()) + InitList = Syntactic; + for (unsigned i = 0, e = InitList->getNumInits(); i != e; ++i) + mangleExpression(InitList->getInit(i)); +} + void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) { // <expression> ::= <unary operator-name> <expression> // ::= <binary operator-name> <expression> <expression> @@ -2631,7 +2676,6 @@ recurse: // These all can only appear in local or variable-initialization // contexts and so should never appear in a mangling. case Expr::AddrLabelExprClass: - case Expr::DesignatedInitExprClass: case Expr::ImplicitValueInitExprClass: case Expr::ParenListExprClass: case Expr::LambdaExprClass: @@ -2641,9 +2685,9 @@ recurse: // FIXME: invent manglings for all these. case Expr::BlockExprClass: - case Expr::CXXPseudoDestructorExprClass: case Expr::ChooseExprClass: case Expr::CompoundLiteralExprClass: + case Expr::DesignatedInitExprClass: case Expr::ExtVectorElementExprClass: case Expr::GenericSelectionExprClass: case Expr::ObjCEncodeExprClass: @@ -2713,9 +2757,7 @@ recurse: case Expr::InitListExprClass: { Out << "il"; - const InitListExpr *InitList = cast<InitListExpr>(E); - for (unsigned i = 0, e = InitList->getNumInits(); i != e; ++i) - mangleExpression(InitList->getInit(i)); + mangleInitListElements(cast<InitListExpr>(E)); Out << "E"; break; } @@ -2759,9 +2801,14 @@ recurse: Out << "cl"; } - mangleExpression(CE->getCallee(), CE->getNumArgs()); - for (unsigned I = 0, N = CE->getNumArgs(); I != N; ++I) - mangleExpression(CE->getArg(I)); + unsigned CallArity = CE->getNumArgs(); + for (const Expr *Arg : CE->arguments()) + if (isa<PackExpansionExpr>(Arg)) + CallArity = UnknownArity; + + mangleExpression(CE->getCallee(), CallArity); + for (const Expr *Arg : CE->arguments()) + mangleExpression(Arg); Out << 'E'; break; } @@ -2793,9 +2840,7 @@ recurse: } else if (New->getInitializationStyle() == CXXNewExpr::ListInit && isa<InitListExpr>(Init)) { // Only take InitListExprs apart for list-initialization. - const InitListExpr *InitList = cast<InitListExpr>(Init); - for (unsigned i = 0, e = InitList->getNumInits(); i != e; ++i) - mangleExpression(InitList->getInit(i)); + mangleInitListElements(cast<InitListExpr>(Init)); } else mangleExpression(Init); } @@ -2803,6 +2848,33 @@ recurse: break; } + case Expr::CXXPseudoDestructorExprClass: { + const auto *PDE = cast<CXXPseudoDestructorExpr>(E); + if (const Expr *Base = PDE->getBase()) + mangleMemberExprBase(Base, PDE->isArrow()); + NestedNameSpecifier *Qualifier = PDE->getQualifier(); + QualType ScopeType; + if (TypeSourceInfo *ScopeInfo = PDE->getScopeTypeInfo()) { + if (Qualifier) { + mangleUnresolvedPrefix(Qualifier, + /*Recursive=*/true); + mangleUnresolvedTypeOrSimpleId(ScopeInfo->getType()); + Out << 'E'; + } else { + Out << "sr"; + if (!mangleUnresolvedTypeOrSimpleId(ScopeInfo->getType())) + Out << 'E'; + } + } else if (Qualifier) { + mangleUnresolvedPrefix(Qualifier); + } + // <base-unresolved-name> ::= dn <destructor-name> + Out << "dn"; + QualType DestroyedType = PDE->getDestroyedType(); + mangleUnresolvedTypeOrSimpleId(DestroyedType); + break; + } + case Expr::MemberExprClass: { const MemberExpr *ME = cast<MemberExpr>(E); mangleMemberExpr(ME->getBase(), ME->isArrow(), @@ -2813,9 +2885,9 @@ recurse: case Expr::UnresolvedMemberExprClass: { const UnresolvedMemberExpr *ME = cast<UnresolvedMemberExpr>(E); - mangleMemberExpr(ME->getBase(), ME->isArrow(), - ME->getQualifier(), nullptr, ME->getMemberName(), - Arity); + mangleMemberExpr(ME->isImplicitAccess() ? nullptr : ME->getBase(), + ME->isArrow(), ME->getQualifier(), nullptr, + ME->getMemberName(), Arity); if (ME->hasExplicitTemplateArgs()) mangleTemplateArgs(ME->getExplicitTemplateArgs()); break; @@ -2824,8 +2896,9 @@ recurse: case Expr::CXXDependentScopeMemberExprClass: { const CXXDependentScopeMemberExpr *ME = cast<CXXDependentScopeMemberExpr>(E); - mangleMemberExpr(ME->getBase(), ME->isArrow(), - ME->getQualifier(), ME->getFirstQualifierFoundInScope(), + mangleMemberExpr(ME->isImplicitAccess() ? nullptr : ME->getBase(), + ME->isArrow(), ME->getQualifier(), + ME->getFirstQualifierFoundInScope(), ME->getMember(), Arity); if (ME->hasExplicitTemplateArgs()) mangleTemplateArgs(ME->getExplicitTemplateArgs()); @@ -2834,7 +2907,7 @@ recurse: case Expr::UnresolvedLookupExprClass: { const UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>(E); - mangleUnresolvedName(ULE->getQualifier(), nullptr, ULE->getName(), Arity); + mangleUnresolvedName(ULE->getQualifier(), ULE->getName(), Arity); // All the <unresolved-name> productions end in a // base-unresolved-name, where <template-args> are just tacked @@ -2856,26 +2929,55 @@ recurse: break; } - case Expr::CXXTemporaryObjectExprClass: case Expr::CXXConstructExprClass: { - const CXXConstructExpr *CE = cast<CXXConstructExpr>(E); + const auto *CE = cast<CXXConstructExpr>(E); + if (!CE->isListInitialization() || CE->isStdInitListInitialization()) { + assert( + CE->getNumArgs() >= 1 && + (CE->getNumArgs() == 1 || isa<CXXDefaultArgExpr>(CE->getArg(1))) && + "implicit CXXConstructExpr must have one argument"); + return mangleExpression(cast<CXXConstructExpr>(E)->getArg(0)); + } + Out << "il"; + for (auto *E : CE->arguments()) + mangleExpression(E); + Out << "E"; + break; + } + + case Expr::CXXTemporaryObjectExprClass: { + const auto *CE = cast<CXXTemporaryObjectExpr>(E); unsigned N = CE->getNumArgs(); + bool List = CE->isListInitialization(); - if (CE->isListInitialization()) + if (List) Out << "tl"; else Out << "cv"; mangleType(CE->getType()); - if (N != 1) Out << '_'; - for (unsigned I = 0; I != N; ++I) mangleExpression(CE->getArg(I)); - if (N != 1) Out << 'E'; + if (!List && N != 1) + Out << '_'; + if (CE->isStdInitListInitialization()) { + // We implicitly created a std::initializer_list<T> for the first argument + // of a constructor of type U in an expression of the form U{a, b, c}. + // Strip all the semantic gunk off the initializer list. + auto *SILE = + cast<CXXStdInitializerListExpr>(CE->getArg(0)->IgnoreImplicit()); + auto *ILE = cast<InitListExpr>(SILE->getSubExpr()->IgnoreImplicit()); + mangleInitListElements(ILE); + } else { + for (auto *E : CE->arguments()) + mangleExpression(E); + } + if (List || N != 1) + Out << 'E'; break; } case Expr::CXXScalarValueInitExprClass: - Out <<"cv"; + Out << "cv"; mangleType(E->getType()); - Out <<"_E"; + Out << "_E"; break; case Expr::CXXNoexceptExprClass: @@ -3020,10 +3122,28 @@ recurse: // Fall through to mangle the cast itself. case Expr::CStyleCastExprClass: - case Expr::CXXFunctionalCastExprClass: mangleCastExpression(E, "cv"); break; + case Expr::CXXFunctionalCastExprClass: { + auto *Sub = cast<ExplicitCastExpr>(E)->getSubExpr()->IgnoreImplicit(); + // FIXME: Add isImplicit to CXXConstructExpr. + if (auto *CCE = dyn_cast<CXXConstructExpr>(Sub)) + if (CCE->getParenOrBraceRange().isInvalid()) + Sub = CCE->getArg(0)->IgnoreImplicit(); + if (auto *StdInitList = dyn_cast<CXXStdInitializerListExpr>(Sub)) + Sub = StdInitList->getSubExpr()->IgnoreImplicit(); + if (auto *IL = dyn_cast<InitListExpr>(Sub)) { + Out << "tl"; + mangleType(E->getType()); + mangleInitListElements(IL); + Out << "E"; + } else { + mangleCastExpression(E, "cv"); + } + break; + } + case Expr::CXXStaticCastExprClass: mangleCastExpression(E, "sc"); break; @@ -3058,7 +3178,7 @@ recurse: default: // <expr-primary> ::= L <mangled-name> E # external name Out << 'L'; - mangle(D, "_Z"); + mangle(D); Out << 'E'; break; @@ -3101,8 +3221,7 @@ recurse: case Expr::DependentScopeDeclRefExprClass: { const DependentScopeDeclRefExpr *DRE = cast<DependentScopeDeclRefExpr>(E); - mangleUnresolvedName(DRE->getQualifier(), nullptr, DRE->getDeclName(), - Arity); + mangleUnresolvedName(DRE->getQualifier(), DRE->getDeclName(), Arity); // All the <unresolved-name> productions end in a // base-unresolved-name, where <template-args> are just tacked @@ -3327,6 +3446,9 @@ void CXXNameMangler::mangleCXXCtorType(CXXCtorType T) { case Ctor_Comdat: Out << "C5"; break; + case Ctor_DefaultClosure: + case Ctor_CopyingClosure: + llvm_unreachable("closure constructors don't exist for the Itanium ABI!"); } } @@ -3410,8 +3532,8 @@ void CXXNameMangler::mangleTemplateArg(TemplateArgument A) { if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { const ValueDecl *D = DRE->getDecl(); if (isa<VarDecl>(D) || isa<FunctionDecl>(D)) { - Out << "L"; - mangle(D, "_Z"); + Out << 'L'; + mangle(D); Out << 'E'; break; } @@ -3440,13 +3562,7 @@ void CXXNameMangler::mangleTemplateArg(TemplateArgument A) { Out << 'L'; // References to external entities use the mangled name; if the name would // not normally be manged then mangle it as unqualified. - // - // FIXME: The ABI specifies that external names here should have _Z, but - // gcc leaves this off. - if (compensateMangling) - mangle(D, "_Z"); - else - mangle(D, "Z"); + mangle(D); Out << 'E'; if (compensateMangling) @@ -3524,8 +3640,8 @@ bool CXXNameMangler::mangleSubstitution(const NamedDecl *ND) { return mangleSubstitution(reinterpret_cast<uintptr_t>(ND)); } -/// \brief Determine whether the given type has any qualifiers that are -/// relevant for substitutions. +/// Determine whether the given type has any qualifiers that are relevant for +/// substitutions. static bool hasMangledSubstitutionQualifiers(QualType T) { Qualifiers Qs = T.getQualifiers(); return Qs.getCVRQualifiers() || Qs.hasAddressSpace(); @@ -3571,8 +3687,8 @@ static bool isCharType(QualType T) { T->isSpecificBuiltinType(BuiltinType::Char_U); } -/// isCharSpecialization - Returns whether a given type is a template -/// specialization of a given name with a single argument of type char. +/// Returns whether a given type is a template specialization of a given name +/// with a single argument of type char. static bool isCharSpecialization(QualType T, const char *Name) { if (T.isNull()) return false; @@ -3722,8 +3838,8 @@ void CXXNameMangler::addSubstitution(uintptr_t Ptr) { // -/// \brief Mangles the name of the declaration D and emits that name to the -/// given output stream. +/// Mangles the name of the declaration D and emits that name to the given +/// output stream. /// /// If the declaration D requires a mangled name, this routine will emit that /// mangled name to \p os and return true. Otherwise, \p os will be unchanged @@ -3815,8 +3931,7 @@ void ItaniumMangleContextImpl::mangleCXXDtorThunk( Mangler.mangleFunctionEncoding(DD); } -/// mangleGuardVariable - Returns the mangled name for a guard variable -/// for the passed in VarDecl. +/// Returns the mangled name for a guard variable for the passed in VarDecl. void ItaniumMangleContextImpl::mangleStaticGuardVariable(const VarDecl *D, raw_ostream &Out) { // <special-name> ::= GV <object name> # Guard variable for one-time @@ -3845,6 +3960,26 @@ void ItaniumMangleContextImpl::mangleDynamicAtExitDestructor(const VarDecl *D, Mangler.getStream() << D->getName(); } +void ItaniumMangleContextImpl::mangleSEHFilterExpression( + const NamedDecl *EnclosingDecl, raw_ostream &Out) { + CXXNameMangler Mangler(*this, Out); + Mangler.getStream() << "__filt_"; + if (shouldMangleDeclName(EnclosingDecl)) + Mangler.mangle(EnclosingDecl); + else + Mangler.getStream() << EnclosingDecl->getName(); +} + +void ItaniumMangleContextImpl::mangleSEHFinallyBlock( + const NamedDecl *EnclosingDecl, raw_ostream &Out) { + CXXNameMangler Mangler(*this, Out); + Mangler.getStream() << "__fin_"; + if (shouldMangleDeclName(EnclosingDecl)) + Mangler.mangle(EnclosingDecl); + else + Mangler.getStream() << EnclosingDecl->getName(); +} + void ItaniumMangleContextImpl::mangleItaniumThreadLocalInit(const VarDecl *D, raw_ostream &Out) { // <special-name> ::= TH <object name> @@ -3923,6 +4058,22 @@ void ItaniumMangleContextImpl::mangleTypeName(QualType Ty, raw_ostream &Out) { mangleCXXRTTIName(Ty, Out); } +void ItaniumMangleContextImpl::mangleCXXVTableBitSet(const CXXRecordDecl *RD, + raw_ostream &Out) { + Linkage L = RD->getLinkageInternal(); + if (L == InternalLinkage || L == UniqueExternalLinkage) { + // This part of the identifier needs to be unique across all translation + // units in the linked program. The scheme fails if multiple translation + // units are compiled using the same relative source file path, or if + // multiple translation units are built from the same source file. + SourceManager &SM = getASTContext().getSourceManager(); + Out << "[" << SM.getFileEntryForID(SM.getMainFileID())->getName() << "]"; + } + + CXXNameMangler Mangler(*this, Out); + Mangler.mangleType(QualType(RD->getTypeForDecl(), 0)); +} + void ItaniumMangleContextImpl::mangleStringLiteral(const StringLiteral *, raw_ostream &) { llvm_unreachable("Can't mangle string literals"); } diff --git a/contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp b/contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp index 0603d3b..93ff77a 100644 --- a/contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp +++ b/contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp @@ -31,11 +31,12 @@ class MicrosoftNumberingContext : public MangleNumberingContext { llvm::DenseMap<const Type *, unsigned> ManglingNumbers; unsigned LambdaManglingNumber; unsigned StaticLocalNumber; + unsigned StaticThreadlocalNumber; public: MicrosoftNumberingContext() : MangleNumberingContext(), LambdaManglingNumber(0), - StaticLocalNumber(0) {} + StaticLocalNumber(0), StaticThreadlocalNumber(0) {} unsigned getManglingNumber(const CXXMethodDecl *CallOperator) override { return ++LambdaManglingNumber; @@ -47,6 +48,8 @@ public: } unsigned getStaticLocalNumber(const VarDecl *VD) override { + if (VD->getTLSKind()) + return ++StaticThreadlocalNumber; return ++StaticLocalNumber; } @@ -63,6 +66,10 @@ public: class MicrosoftCXXABI : public CXXABI { ASTContext &Context; + llvm::SmallDenseMap<CXXRecordDecl *, CXXConstructorDecl *> RecordToCopyCtor; + llvm::SmallDenseMap<std::pair<const CXXConstructorDecl *, unsigned>, Expr *> + CtorToDefaultArgExpr; + public: MicrosoftCXXABI(ASTContext &Ctx) : Context(Ctx) { } @@ -82,13 +89,36 @@ public: return false; const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); - + // In the Microsoft ABI, classes can have one or two vtable pointers. - CharUnits PointerSize = - Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0)); + CharUnits PointerSize = + Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0)); return Layout.getNonVirtualSize() == PointerSize || Layout.getNonVirtualSize() == PointerSize * 2; - } + } + + void addDefaultArgExprForConstructor(const CXXConstructorDecl *CD, + unsigned ParmIdx, Expr *DAE) override { + CtorToDefaultArgExpr[std::make_pair(CD, ParmIdx)] = DAE; + } + + Expr *getDefaultArgExprForConstructor(const CXXConstructorDecl *CD, + unsigned ParmIdx) override { + return CtorToDefaultArgExpr[std::make_pair(CD, ParmIdx)]; + } + + const CXXConstructorDecl * + getCopyConstructorForExceptionObject(CXXRecordDecl *RD) override { + return RecordToCopyCtor[RD]; + } + + void + addCopyConstructorForExceptionObject(CXXRecordDecl *RD, + CXXConstructorDecl *CD) override { + assert(CD != nullptr); + assert(RecordToCopyCtor[RD] == nullptr || RecordToCopyCtor[RD] == CD); + RecordToCopyCtor[RD] = CD; + } MangleNumberingContext *createMangleNumberingContext() const override { return new MicrosoftNumberingContext(); @@ -186,29 +216,28 @@ getMSMemberPointerSlots(const MemberPointerType *MPT) { std::pair<uint64_t, unsigned> MicrosoftCXXABI::getMemberPointerWidthAndAlign( const MemberPointerType *MPT) const { - const TargetInfo &Target = Context.getTargetInfo(); - assert(Target.getTriple().getArch() == llvm::Triple::x86 || - Target.getTriple().getArch() == llvm::Triple::x86_64); - unsigned Ptrs, Ints; - std::tie(Ptrs, Ints) = getMSMemberPointerSlots(MPT); // The nominal struct is laid out with pointers followed by ints and aligned // to a pointer width if any are present and an int width otherwise. + const TargetInfo &Target = Context.getTargetInfo(); unsigned PtrSize = Target.getPointerWidth(0); unsigned IntSize = Target.getIntWidth(); + + unsigned Ptrs, Ints; + std::tie(Ptrs, Ints) = getMSMemberPointerSlots(MPT); uint64_t Width = Ptrs * PtrSize + Ints * IntSize; unsigned Align; // When MSVC does x86_32 record layout, it aligns aggregate member pointers to // 8 bytes. However, __alignof usually returns 4 for data memptrs and 8 for // function memptrs. - if (Ptrs + Ints > 1 && Target.getTriple().getArch() == llvm::Triple::x86) - Align = 8 * 8; + if (Ptrs + Ints > 1 && Target.getTriple().isArch32Bit()) + Align = 64; else if (Ptrs) Align = Target.getPointerAlign(0); else Align = Target.getIntAlign(); - if (Target.getTriple().getArch() == llvm::Triple::x86_64) + if (Target.getTriple().isArch64Bit()) Width = llvm::RoundUpToAlignment(Width, Align); return std::make_pair(Width, Align); } diff --git a/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp b/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp index 72f90f6..77522c1 100644 --- a/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp +++ b/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp @@ -67,11 +67,15 @@ static const DeclContext *getEffectiveParentContext(const DeclContext *DC) { return getEffectiveDeclContext(cast<Decl>(DC)); } -static const FunctionDecl *getStructor(const FunctionDecl *fn) { - if (const FunctionTemplateDecl *ftd = fn->getPrimaryTemplate()) - return ftd->getTemplatedDecl(); +static const FunctionDecl *getStructor(const NamedDecl *ND) { + if (const auto *FTD = dyn_cast<FunctionTemplateDecl>(ND)) + return FTD->getTemplatedDecl(); - return fn; + const auto *FD = cast<FunctionDecl>(ND); + if (const auto *FTD = FD->getPrimaryTemplate()) + return FTD->getTemplatedDecl(); + + return FD; } static bool isLambda(const NamedDecl *ND) { @@ -89,6 +93,8 @@ class MicrosoftMangleContextImpl : public MicrosoftMangleContext { llvm::DenseMap<DiscriminatorKeyTy, unsigned> Discriminator; llvm::DenseMap<const NamedDecl *, unsigned> Uniquifier; llvm::DenseMap<const CXXRecordDecl *, unsigned> LambdaIds; + llvm::DenseMap<const NamedDecl *, unsigned> SEHFilterIds; + llvm::DenseMap<const NamedDecl *, unsigned> SEHFinallyIds; public: MicrosoftMangleContextImpl(ASTContext &Context, DiagnosticsEngine &Diags) @@ -109,6 +115,16 @@ public: void mangleCXXVBTable(const CXXRecordDecl *Derived, ArrayRef<const CXXRecordDecl *> BasePath, raw_ostream &Out) override; + void mangleCXXThrowInfo(QualType T, bool IsConst, bool IsVolatile, + uint32_t NumEntries, raw_ostream &Out) override; + void mangleCXXCatchableTypeArray(QualType T, uint32_t NumEntries, + raw_ostream &Out) override; + void mangleCXXCatchableType(QualType T, const CXXConstructorDecl *CD, + CXXCtorType CT, uint32_t Size, uint32_t NVOffset, + int32_t VBPtrOffset, uint32_t VBIndex, + raw_ostream &Out) override; + void mangleCXXCatchHandlerType(QualType T, uint32_t Flags, + raw_ostream &Out) override; void mangleCXXRTTI(QualType T, raw_ostream &Out) override; void mangleCXXRTTIName(QualType T, raw_ostream &Out) override; void mangleCXXRTTIBaseClassDescriptor(const CXXRecordDecl *Derived, @@ -131,10 +147,18 @@ public: void mangleReferenceTemporary(const VarDecl *, unsigned ManglingNumber, raw_ostream &) override; void mangleStaticGuardVariable(const VarDecl *D, raw_ostream &Out) override; + void mangleThreadSafeStaticGuardVariable(const VarDecl *D, unsigned GuardNum, + raw_ostream &Out) override; void mangleDynamicInitializer(const VarDecl *D, raw_ostream &Out) override; void mangleDynamicAtExitDestructor(const VarDecl *D, raw_ostream &Out) override; + void mangleSEHFilterExpression(const NamedDecl *EnclosingDecl, + raw_ostream &Out) override; + void mangleSEHFinallyBlock(const NamedDecl *EnclosingDecl, + raw_ostream &Out) override; void mangleStringLiteral(const StringLiteral *SL, raw_ostream &Out) override; + void mangleCXXVTableBitSet(const CXXRecordDecl *RD, + raw_ostream &Out) override; bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) { // Lambda closure types are already numbered. if (isLambda(ND)) @@ -211,6 +235,12 @@ public: 64) {} MicrosoftCXXNameMangler(MicrosoftMangleContextImpl &C, raw_ostream &Out_, + const CXXConstructorDecl *D, CXXCtorType Type) + : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type), + PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(0) == + 64) {} + + MicrosoftCXXNameMangler(MicrosoftMangleContextImpl &C, raw_ostream &Out_, const CXXDestructorDecl *D, CXXDtorType Type) : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type), PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(0) == @@ -220,7 +250,7 @@ public: void mangle(const NamedDecl *D, StringRef Prefix = "\01?"); void mangleName(const NamedDecl *ND); - void mangleFunctionEncoding(const FunctionDecl *FD); + void mangleFunctionEncoding(const FunctionDecl *FD, bool ShouldMangle); void mangleVariableEncoding(const VarDecl *VD); void mangleMemberDataPointer(const CXXRecordDecl *RD, const ValueDecl *VD); void mangleMemberFunctionPointer(const CXXRecordDecl *RD, @@ -247,7 +277,7 @@ private: void mangleQualifiers(Qualifiers Quals, bool IsMember); void mangleRefQualifier(RefQualifierKind RefQualifier); void manglePointerCVQualifiers(Qualifiers Quals); - void manglePointerExtQualifiers(Qualifiers Quals, const Type *PointeeType); + void manglePointerExtQualifiers(Qualifiers Quals, QualType PointeeType); void mangleUnscopedTemplateName(const TemplateDecl *ND); void @@ -261,6 +291,7 @@ private: #define ABSTRACT_TYPE(CLASS, PARENT) #define NON_CANONICAL_TYPE(CLASS, PARENT) #define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T, \ + Qualifiers Quals, \ SourceRange Range); #include "clang/AST/TypeNodes.def" #undef ABSTRACT_TYPE @@ -271,6 +302,7 @@ private: void mangleDecayedArrayType(const ArrayType *T); void mangleArrayType(const ArrayType *T); void mangleFunctionClass(const FunctionDecl *FD); + void mangleCallingConvention(CallingConv CC); void mangleCallingConvention(const FunctionType *T); void mangleIntegerLiteral(const llvm::APSInt &Number, bool IsBoolean); void mangleExpression(const Expr *E); @@ -352,7 +384,7 @@ void MicrosoftCXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) { Out << Prefix; mangleName(D); if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) - mangleFunctionEncoding(FD); + mangleFunctionEncoding(FD, Context.shouldMangleDeclName(FD)); else if (const VarDecl *VD = dyn_cast<VarDecl>(D)) mangleVariableEncoding(VD); else { @@ -365,7 +397,8 @@ void MicrosoftCXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) { } } -void MicrosoftCXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) { +void MicrosoftCXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD, + bool ShouldMangle) { // <type-encoding> ::= <function-class> <function-type> // Since MSVC operates on the type as written and not the canonical type, it @@ -380,13 +413,20 @@ void MicrosoftCXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) { // extern "C" functions can hold entities that must be mangled. // As it stands, these functions still need to get expressed in the full // external name. They have their class and type omitted, replaced with '9'. - if (Context.shouldMangleDeclName(FD)) { - // First, the function class. + if (ShouldMangle) { + // We would like to mangle all extern "C" functions using this additional + // component but this would break compatibility with MSVC's behavior. + // Instead, do this when we know that compatibility isn't important (in + // other words, when it is an overloaded extern "C" funciton). + if (FD->isExternC() && FD->hasAttr<OverloadableAttr>()) + Out << "$$J0"; + mangleFunctionClass(FD); mangleFunctionType(FT, FD); - } else + } else { Out << '9'; + } } void MicrosoftCXXNameMangler::mangleVariableEncoding(const VarDecl *VD) { @@ -422,7 +462,7 @@ void MicrosoftCXXNameMangler::mangleVariableEncoding(const VarDecl *VD) { Ty->isMemberPointerType()) { mangleType(Ty, SR, QMM_Drop); manglePointerExtQualifiers( - Ty.getDesugaredType(getASTContext()).getLocalQualifiers(), nullptr); + Ty.getDesugaredType(getASTContext()).getLocalQualifiers(), QualType()); if (const MemberPointerType *MPT = Ty->getAs<MemberPointerType>()) { mangleQualifiers(MPT->getPointeeType().getQualifiers(), true); // Member pointers are suffixed with a back reference to the member @@ -525,7 +565,7 @@ MicrosoftCXXNameMangler::mangleMemberFunctionPointer(const CXXRecordDecl *RD, } } else { mangleName(MD); - mangleFunctionEncoding(MD); + mangleFunctionEncoding(MD, /*ShouldMangle=*/true); } } else { // Null single inheritance member functions are encoded as a simple nullptr. @@ -559,7 +599,7 @@ void MicrosoftCXXNameMangler::mangleVirtualMemPtrThunk( Out << "$B"; mangleNumber(OffsetInVFTable); Out << 'A'; - Out << (PointersAre64Bit ? 'A' : 'E'); + mangleCallingConvention(MD->getType()->getAs<FunctionProtoType>()); } void MicrosoftCXXNameMangler::mangleName(const NamedDecl *ND) { @@ -757,12 +797,18 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND, llvm_unreachable("Can't mangle Objective-C selector names here!"); case DeclarationName::CXXConstructorName: - if (ND == Structor) { - assert(StructorType == Ctor_Complete && - "Should never be asked to mangle a ctor other than complete"); + if (Structor == getStructor(ND)) { + if (StructorType == Ctor_CopyingClosure) { + Out << "?_O"; + return; + } + if (StructorType == Ctor_DefaultClosure) { + Out << "?_F"; + return; + } } Out << "?0"; - break; + return; case DeclarationName::CXXDestructorName: if (ND == Structor) @@ -1134,10 +1180,13 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD, cast<ValueDecl>(ND)); } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) { const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD); - if (MD && MD->isInstance()) + if (MD && MD->isInstance()) { mangleMemberFunctionPointer(MD->getParent()->getMostRecentDecl(), MD); - else - mangle(FD, "$1?"); + } else { + Out << "$1?"; + mangleName(FD); + mangleFunctionEncoding(FD, /*ShouldMangle=*/true); + } } else { mangle(ND, TA.getParamTypeForDecl()->isReferenceType() ? "$E?" : "$1?"); } @@ -1171,7 +1220,12 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD, if (TemplateArgs.empty()) { if (isa<TemplateTypeParmDecl>(Parm) || isa<TemplateTemplateParmDecl>(Parm)) - Out << "$$V"; + // MSVC 2015 changed the mangling for empty expanded template packs, + // use the old mangling for link compatibility for old versions. + Out << (Context.getASTContext().getLangOpts().isCompatibleWithMSVC( + LangOptions::MSVC2015) + ? "$$V" + : "$$$V"); else if (isa<NonTypeTemplateParmDecl>(Parm)) Out << "$S"; else @@ -1298,11 +1352,11 @@ MicrosoftCXXNameMangler::mangleRefQualifier(RefQualifierKind RefQualifier) { } } -void -MicrosoftCXXNameMangler::manglePointerExtQualifiers(Qualifiers Quals, - const Type *PointeeType) { +void MicrosoftCXXNameMangler::manglePointerExtQualifiers(Qualifiers Quals, + QualType PointeeType) { bool HasRestrict = Quals.hasRestrict(); - if (PointersAre64Bit && (!PointeeType || !PointeeType->isFunctionType())) + if (PointersAre64Bit && + (PointeeType.isNull() || !PointeeType->isFunctionType())) Out << 'E'; if (HasRestrict) @@ -1338,29 +1392,38 @@ void MicrosoftCXXNameMangler::mangleArgumentType(QualType T, // e.g. // void (*x)(void) will not form a backreference with void x(void) void *TypePtr; - if (const DecayedType *DT = T->getAs<DecayedType>()) { - TypePtr = DT->getOriginalType().getCanonicalType().getAsOpaquePtr(); + if (const auto *DT = T->getAs<DecayedType>()) { + QualType OriginalType = DT->getOriginalType(); + // All decayed ArrayTypes should be treated identically; as-if they were + // a decayed IncompleteArrayType. + if (const auto *AT = getASTContext().getAsArrayType(OriginalType)) + OriginalType = getASTContext().getIncompleteArrayType( + AT->getElementType(), AT->getSizeModifier(), + AT->getIndexTypeCVRQualifiers()); + + TypePtr = OriginalType.getCanonicalType().getAsOpaquePtr(); // If the original parameter was textually written as an array, // instead treat the decayed parameter like it's const. // // e.g. // int [] -> int * const - if (DT->getOriginalType()->isArrayType()) + if (OriginalType->isArrayType()) T = T.withConst(); - } else + } else { TypePtr = T.getCanonicalType().getAsOpaquePtr(); + } ArgBackRefMap::iterator Found = TypeBackReferences.find(TypePtr); if (Found == TypeBackReferences.end()) { - size_t OutSizeBefore = Out.GetNumBytesInBuffer(); + size_t OutSizeBefore = Out.tell(); mangleType(T, Range, QMM_Drop); // See if it's worth creating a back reference. // Only types longer than 1 character are considered // and only 10 back references slots are available: - bool LongerThanOneChar = (Out.GetNumBytesInBuffer() - OutSizeBefore > 1); + bool LongerThanOneChar = (Out.tell() - OutSizeBefore > 1); if (LongerThanOneChar && TypeBackReferences.size() < 10) { size_t Size = TypeBackReferences.size(); TypeBackReferences[TypePtr] = Size; @@ -1388,7 +1451,7 @@ void MicrosoftCXXNameMangler::mangleType(QualType T, SourceRange Range, } bool IsPointer = T->isAnyPointerType() || T->isMemberPointerType() || - T->isBlockPointerType(); + T->isReferenceType() || T->isBlockPointerType(); switch (QMM) { case QMM_Drop: @@ -1415,11 +1478,6 @@ void MicrosoftCXXNameMangler::mangleType(QualType T, SourceRange Range, break; } - // We have to mangle these now, while we still have enough information. - if (IsPointer) { - manglePointerCVQualifiers(Quals); - manglePointerExtQualifiers(Quals, T->getPointeeType().getTypePtr()); - } const Type *ty = T.getTypePtr(); switch (ty->getTypeClass()) { @@ -1430,7 +1488,7 @@ void MicrosoftCXXNameMangler::mangleType(QualType T, SourceRange Range, return; #define TYPE(CLASS, PARENT) \ case Type::CLASS: \ - mangleType(cast<CLASS##Type>(ty), Range); \ + mangleType(cast<CLASS##Type>(ty), Quals, Range); \ break; #include "clang/AST/TypeNodes.def" #undef ABSTRACT_TYPE @@ -1439,7 +1497,7 @@ void MicrosoftCXXNameMangler::mangleType(QualType T, SourceRange Range, } } -void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, +void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers, SourceRange Range) { // <type> ::= <builtin-type> // <builtin-type> ::= X # void @@ -1525,7 +1583,7 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, } // <type> ::= <function-type> -void MicrosoftCXXNameMangler::mangleType(const FunctionProtoType *T, +void MicrosoftCXXNameMangler::mangleType(const FunctionProtoType *T, Qualifiers, SourceRange) { // Structors only appear in decls, so at this point we know it's not a // structor type. @@ -1539,7 +1597,7 @@ void MicrosoftCXXNameMangler::mangleType(const FunctionProtoType *T, } } void MicrosoftCXXNameMangler::mangleType(const FunctionNoProtoType *T, - SourceRange) { + Qualifiers, SourceRange) { llvm_unreachable("Can't mangle K&R function prototypes"); } @@ -1553,24 +1611,34 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T, SourceRange Range; if (D) Range = D->getSourceRange(); - bool IsStructor = false, HasThisQuals = ForceThisQuals; + bool IsStructor = false, HasThisQuals = ForceThisQuals, IsCtorClosure = false; + CallingConv CC = T->getCallConv(); if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(D)) { if (MD->isInstance()) HasThisQuals = true; - if (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)) + if (isa<CXXDestructorDecl>(MD)) { + IsStructor = true; + } else if (isa<CXXConstructorDecl>(MD)) { IsStructor = true; + IsCtorClosure = (StructorType == Ctor_CopyingClosure || + StructorType == Ctor_DefaultClosure) && + getStructor(MD) == Structor; + if (IsCtorClosure) + CC = getASTContext().getDefaultCallingConvention( + /*IsVariadic=*/false, /*IsCXXMethod=*/true); + } } // If this is a C++ instance method, mangle the CVR qualifiers for the // this pointer. if (HasThisQuals) { Qualifiers Quals = Qualifiers::fromCVRMask(Proto->getTypeQuals()); - manglePointerExtQualifiers(Quals, /*PointeeType=*/nullptr); + manglePointerExtQualifiers(Quals, /*PointeeType=*/QualType()); mangleRefQualifier(Proto->getRefQualifier()); mangleQualifiers(Quals, /*IsMember=*/false); } - mangleCallingConvention(T); + mangleCallingConvention(CC); // <return-type> ::= <type> // ::= @ # structors (they have no declared return type) @@ -1584,6 +1652,29 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T, Out << (PointersAre64Bit ? "PEAXI@Z" : "PAXI@Z"); return; } + if (IsCtorClosure) { + // Default constructor closure and copy constructor closure both return + // void. + Out << 'X'; + + if (StructorType == Ctor_DefaultClosure) { + // Default constructor closure always has no arguments. + Out << 'X'; + } else if (StructorType == Ctor_CopyingClosure) { + // Copy constructor closure always takes an unqualified reference. + mangleArgumentType(getASTContext().getLValueReferenceType( + Proto->getParamType(0) + ->getAs<LValueReferenceType>() + ->getPointeeType(), + /*SpelledAsLValue=*/true), + Range); + Out << '@'; + } else { + llvm_unreachable("unexpected constructor closure!"); + } + Out << 'Z'; + return; + } Out << '@'; } else { QualType ResultType = Proto->getReturnType(); @@ -1608,7 +1699,7 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T, Out << 'X'; } else { // Happens for function pointer type arguments for example. - for (const QualType Arg : Proto->param_types()) + for (const QualType &Arg : Proto->param_types()) mangleArgumentType(Arg, Range); // <builtin-type> ::= Z # ellipsis if (Proto->isVariadic()) @@ -1673,10 +1764,11 @@ void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) { else Out << 'Q'; } - } else + } else { Out << 'Y'; + } } -void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T) { +void MicrosoftCXXNameMangler::mangleCallingConvention(CallingConv CC) { // <calling-convention> ::= A # __cdecl // ::= B # __export __cdecl // ::= C # __pascal @@ -1693,7 +1785,7 @@ void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T) { // that keyword. (It didn't actually export them, it just made them so // that they could be in a DLL and somebody from another module could call // them.) - CallingConv CC = T->getCallConv(); + switch (CC) { default: llvm_unreachable("Unsupported CC for mangling"); @@ -1707,6 +1799,9 @@ void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T) { case CC_X86VectorCall: Out << 'Q'; break; } } +void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T) { + mangleCallingConvention(T->getCallConv()); +} void MicrosoftCXXNameMangler::mangleThrowSpecification( const FunctionProtoType *FT) { // <throw-spec> ::= Z # throw(...) (default) @@ -1719,7 +1814,7 @@ void MicrosoftCXXNameMangler::mangleThrowSpecification( } void MicrosoftCXXNameMangler::mangleType(const UnresolvedUsingType *T, - SourceRange Range) { + Qualifiers, SourceRange Range) { // Probably should be mangled as a template instantiation; need to see what // VC does first. DiagnosticsEngine &Diags = Context.getDiags(); @@ -1734,10 +1829,12 @@ void MicrosoftCXXNameMangler::mangleType(const UnresolvedUsingType *T, // <struct-type> ::= U <name> // <class-type> ::= V <name> // <enum-type> ::= W4 <name> -void MicrosoftCXXNameMangler::mangleType(const EnumType *T, SourceRange) { +void MicrosoftCXXNameMangler::mangleType(const EnumType *T, Qualifiers, + SourceRange) { mangleType(cast<TagType>(T)->getDecl()); } -void MicrosoftCXXNameMangler::mangleType(const RecordType *T, SourceRange) { +void MicrosoftCXXNameMangler::mangleType(const RecordType *T, Qualifiers, + SourceRange) { mangleType(cast<TagType>(T)->getDecl()); } void MicrosoftCXXNameMangler::mangleType(const TagDecl *TD) { @@ -1772,39 +1869,41 @@ void MicrosoftCXXNameMangler::mangleDecayedArrayType(const ArrayType *T) { manglePointerCVQualifiers(T->getElementType().getQualifiers()); mangleType(T->getElementType(), SourceRange()); } -void MicrosoftCXXNameMangler::mangleType(const ConstantArrayType *T, +void MicrosoftCXXNameMangler::mangleType(const ConstantArrayType *T, Qualifiers, SourceRange) { llvm_unreachable("Should have been special cased"); } -void MicrosoftCXXNameMangler::mangleType(const VariableArrayType *T, +void MicrosoftCXXNameMangler::mangleType(const VariableArrayType *T, Qualifiers, SourceRange) { llvm_unreachable("Should have been special cased"); } void MicrosoftCXXNameMangler::mangleType(const DependentSizedArrayType *T, - SourceRange) { + Qualifiers, SourceRange) { llvm_unreachable("Should have been special cased"); } void MicrosoftCXXNameMangler::mangleType(const IncompleteArrayType *T, - SourceRange) { + Qualifiers, SourceRange) { llvm_unreachable("Should have been special cased"); } void MicrosoftCXXNameMangler::mangleArrayType(const ArrayType *T) { QualType ElementTy(T, 0); SmallVector<llvm::APInt, 3> Dimensions; for (;;) { - if (const ConstantArrayType *CAT = - getASTContext().getAsConstantArrayType(ElementTy)) { + if (ElementTy->isConstantArrayType()) { + const ConstantArrayType *CAT = + getASTContext().getAsConstantArrayType(ElementTy); Dimensions.push_back(CAT->getSize()); ElementTy = CAT->getElementType(); + } else if (ElementTy->isIncompleteArrayType()) { + const IncompleteArrayType *IAT = + getASTContext().getAsIncompleteArrayType(ElementTy); + Dimensions.push_back(llvm::APInt(32, 0)); + ElementTy = IAT->getElementType(); } else if (ElementTy->isVariableArrayType()) { const VariableArrayType *VAT = getASTContext().getAsVariableArrayType(ElementTy); - DiagnosticsEngine &Diags = Context.getDiags(); - unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, - "cannot mangle this variable-length array yet"); - Diags.Report(VAT->getSizeExpr()->getExprLoc(), DiagID) - << VAT->getBracketsRange(); - return; + Dimensions.push_back(llvm::APInt(32, 0)); + ElementTy = VAT->getElementType(); } else if (ElementTy->isDependentSizedArrayType()) { // The dependent expression has to be folded into a constant (TODO). const DependentSizedArrayType *DSAT = @@ -1815,12 +1914,9 @@ void MicrosoftCXXNameMangler::mangleArrayType(const ArrayType *T) { Diags.Report(DSAT->getSizeExpr()->getExprLoc(), DiagID) << DSAT->getBracketsRange(); return; - } else if (const IncompleteArrayType *IAT = - getASTContext().getAsIncompleteArrayType(ElementTy)) { - Dimensions.push_back(llvm::APInt(32, 0)); - ElementTy = IAT->getElementType(); + } else { + break; } - else break; } Out << 'Y'; // <dimension-count> ::= <number> # number of extra dimensions @@ -1833,9 +1929,11 @@ void MicrosoftCXXNameMangler::mangleArrayType(const ArrayType *T) { // <type> ::= <pointer-to-member-type> // <pointer-to-member-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers> // <class name> <type> -void MicrosoftCXXNameMangler::mangleType(const MemberPointerType *T, +void MicrosoftCXXNameMangler::mangleType(const MemberPointerType *T, Qualifiers Quals, SourceRange Range) { QualType PointeeType = T->getPointeeType(); + manglePointerCVQualifiers(Quals); + manglePointerExtQualifiers(Quals, PointeeType); if (const FunctionProtoType *FPT = PointeeType->getAs<FunctionProtoType>()) { Out << '8'; mangleName(T->getClass()->castAs<RecordType>()->getDecl()); @@ -1848,7 +1946,7 @@ void MicrosoftCXXNameMangler::mangleType(const MemberPointerType *T, } void MicrosoftCXXNameMangler::mangleType(const TemplateTypeParmType *T, - SourceRange Range) { + Qualifiers, SourceRange Range) { DiagnosticsEngine &Diags = Context.getDiags(); unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, "cannot mangle this template type parameter type yet"); @@ -1856,9 +1954,8 @@ void MicrosoftCXXNameMangler::mangleType(const TemplateTypeParmType *T, << Range; } -void MicrosoftCXXNameMangler::mangleType( - const SubstTemplateTypeParmPackType *T, - SourceRange Range) { +void MicrosoftCXXNameMangler::mangleType(const SubstTemplateTypeParmPackType *T, + Qualifiers, SourceRange Range) { DiagnosticsEngine &Diags = Context.getDiags(); unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, "cannot mangle this substituted parameter pack yet"); @@ -1869,40 +1966,46 @@ void MicrosoftCXXNameMangler::mangleType( // <type> ::= <pointer-type> // <pointer-type> ::= E? <pointer-cvr-qualifiers> <cvr-qualifiers> <type> // # the E is required for 64-bit non-static pointers -void MicrosoftCXXNameMangler::mangleType(const PointerType *T, +void MicrosoftCXXNameMangler::mangleType(const PointerType *T, Qualifiers Quals, SourceRange Range) { - QualType PointeeTy = T->getPointeeType(); - mangleType(PointeeTy, Range); + QualType PointeeType = T->getPointeeType(); + manglePointerCVQualifiers(Quals); + manglePointerExtQualifiers(Quals, PointeeType); + mangleType(PointeeType, Range); } void MicrosoftCXXNameMangler::mangleType(const ObjCObjectPointerType *T, - SourceRange Range) { + Qualifiers Quals, SourceRange Range) { + QualType PointeeType = T->getPointeeType(); + manglePointerCVQualifiers(Quals); + manglePointerExtQualifiers(Quals, PointeeType); // Object pointers never have qualifiers. Out << 'A'; - manglePointerExtQualifiers(Qualifiers(), T->getPointeeType().getTypePtr()); - mangleType(T->getPointeeType(), Range); + mangleType(PointeeType, Range); } // <type> ::= <reference-type> // <reference-type> ::= A E? <cvr-qualifiers> <type> // # the E is required for 64-bit non-static lvalue references void MicrosoftCXXNameMangler::mangleType(const LValueReferenceType *T, - SourceRange Range) { - Out << 'A'; - manglePointerExtQualifiers(Qualifiers(), T->getPointeeType().getTypePtr()); - mangleType(T->getPointeeType(), Range); + Qualifiers Quals, SourceRange Range) { + QualType PointeeType = T->getPointeeType(); + Out << (Quals.hasVolatile() ? 'B' : 'A'); + manglePointerExtQualifiers(Quals, PointeeType); + mangleType(PointeeType, Range); } // <type> ::= <r-value-reference-type> // <r-value-reference-type> ::= $$Q E? <cvr-qualifiers> <type> // # the E is required for 64-bit non-static rvalue references void MicrosoftCXXNameMangler::mangleType(const RValueReferenceType *T, - SourceRange Range) { - Out << "$$Q"; - manglePointerExtQualifiers(Qualifiers(), T->getPointeeType().getTypePtr()); - mangleType(T->getPointeeType(), Range); + Qualifiers Quals, SourceRange Range) { + QualType PointeeType = T->getPointeeType(); + Out << (Quals.hasVolatile() ? "$$R" : "$$Q"); + manglePointerExtQualifiers(Quals, PointeeType); + mangleType(PointeeType, Range); } -void MicrosoftCXXNameMangler::mangleType(const ComplexType *T, +void MicrosoftCXXNameMangler::mangleType(const ComplexType *T, Qualifiers, SourceRange Range) { DiagnosticsEngine &Diags = Context.getDiags(); unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, @@ -1911,7 +2014,7 @@ void MicrosoftCXXNameMangler::mangleType(const ComplexType *T, << Range; } -void MicrosoftCXXNameMangler::mangleType(const VectorType *T, +void MicrosoftCXXNameMangler::mangleType(const VectorType *T, Qualifiers Quals, SourceRange Range) { const BuiltinType *ET = T->getElementType()->getAs<BuiltinType>(); assert(ET && "vectors with non-builtin elements are unsupported"); @@ -1939,13 +2042,13 @@ void MicrosoftCXXNameMangler::mangleType(const VectorType *T, // our own mangling to handle uses of __vector_size__ on user-specified // types, and for extensions like __v4sf. Out << "T__clang_vec" << T->getNumElements() << '_'; - mangleType(ET, Range); + mangleType(ET, Quals, Range); } Out << "@@"; } -void MicrosoftCXXNameMangler::mangleType(const ExtVectorType *T, +void MicrosoftCXXNameMangler::mangleType(const ExtVectorType *T, Qualifiers, SourceRange Range) { DiagnosticsEngine &Diags = Context.getDiags(); unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, @@ -1954,7 +2057,7 @@ void MicrosoftCXXNameMangler::mangleType(const ExtVectorType *T, << Range; } void MicrosoftCXXNameMangler::mangleType(const DependentSizedExtVectorType *T, - SourceRange Range) { + Qualifiers, SourceRange Range) { DiagnosticsEngine &Diags = Context.getDiags(); unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, "cannot mangle this dependent-sized extended vector type yet"); @@ -1962,14 +2065,14 @@ void MicrosoftCXXNameMangler::mangleType(const DependentSizedExtVectorType *T, << Range; } -void MicrosoftCXXNameMangler::mangleType(const ObjCInterfaceType *T, +void MicrosoftCXXNameMangler::mangleType(const ObjCInterfaceType *T, Qualifiers, SourceRange) { // ObjC interfaces have structs underlying them. Out << 'U'; mangleName(T->getDecl()); } -void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T, +void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T, Qualifiers, SourceRange Range) { // We don't allow overloading by different protocol qualification, // so mangling them isn't necessary. @@ -1977,20 +2080,23 @@ void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T, } void MicrosoftCXXNameMangler::mangleType(const BlockPointerType *T, - SourceRange Range) { + Qualifiers Quals, SourceRange Range) { + QualType PointeeType = T->getPointeeType(); + manglePointerCVQualifiers(Quals); + manglePointerExtQualifiers(Quals, PointeeType); + Out << "_E"; - QualType pointee = T->getPointeeType(); - mangleFunctionType(pointee->castAs<FunctionProtoType>()); + mangleFunctionType(PointeeType->castAs<FunctionProtoType>()); } void MicrosoftCXXNameMangler::mangleType(const InjectedClassNameType *, - SourceRange) { + Qualifiers, SourceRange) { llvm_unreachable("Cannot mangle injected class name type."); } void MicrosoftCXXNameMangler::mangleType(const TemplateSpecializationType *T, - SourceRange Range) { + Qualifiers, SourceRange Range) { DiagnosticsEngine &Diags = Context.getDiags(); unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, "cannot mangle this template specialization type yet"); @@ -1998,7 +2104,7 @@ void MicrosoftCXXNameMangler::mangleType(const TemplateSpecializationType *T, << Range; } -void MicrosoftCXXNameMangler::mangleType(const DependentNameType *T, +void MicrosoftCXXNameMangler::mangleType(const DependentNameType *T, Qualifiers, SourceRange Range) { DiagnosticsEngine &Diags = Context.getDiags(); unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, @@ -2008,8 +2114,8 @@ void MicrosoftCXXNameMangler::mangleType(const DependentNameType *T, } void MicrosoftCXXNameMangler::mangleType( - const DependentTemplateSpecializationType *T, - SourceRange Range) { + const DependentTemplateSpecializationType *T, Qualifiers, + SourceRange Range) { DiagnosticsEngine &Diags = Context.getDiags(); unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, "cannot mangle this dependent template specialization type yet"); @@ -2017,7 +2123,7 @@ void MicrosoftCXXNameMangler::mangleType( << Range; } -void MicrosoftCXXNameMangler::mangleType(const PackExpansionType *T, +void MicrosoftCXXNameMangler::mangleType(const PackExpansionType *T, Qualifiers, SourceRange Range) { DiagnosticsEngine &Diags = Context.getDiags(); unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, @@ -2026,7 +2132,7 @@ void MicrosoftCXXNameMangler::mangleType(const PackExpansionType *T, << Range; } -void MicrosoftCXXNameMangler::mangleType(const TypeOfType *T, +void MicrosoftCXXNameMangler::mangleType(const TypeOfType *T, Qualifiers, SourceRange Range) { DiagnosticsEngine &Diags = Context.getDiags(); unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, @@ -2035,7 +2141,7 @@ void MicrosoftCXXNameMangler::mangleType(const TypeOfType *T, << Range; } -void MicrosoftCXXNameMangler::mangleType(const TypeOfExprType *T, +void MicrosoftCXXNameMangler::mangleType(const TypeOfExprType *T, Qualifiers, SourceRange Range) { DiagnosticsEngine &Diags = Context.getDiags(); unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, @@ -2044,7 +2150,7 @@ void MicrosoftCXXNameMangler::mangleType(const TypeOfExprType *T, << Range; } -void MicrosoftCXXNameMangler::mangleType(const DecltypeType *T, +void MicrosoftCXXNameMangler::mangleType(const DecltypeType *T, Qualifiers, SourceRange Range) { DiagnosticsEngine &Diags = Context.getDiags(); unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, @@ -2054,7 +2160,7 @@ void MicrosoftCXXNameMangler::mangleType(const DecltypeType *T, } void MicrosoftCXXNameMangler::mangleType(const UnaryTransformType *T, - SourceRange Range) { + Qualifiers, SourceRange Range) { DiagnosticsEngine &Diags = Context.getDiags(); unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, "cannot mangle this unary transform type yet"); @@ -2062,7 +2168,8 @@ void MicrosoftCXXNameMangler::mangleType(const UnaryTransformType *T, << Range; } -void MicrosoftCXXNameMangler::mangleType(const AutoType *T, SourceRange Range) { +void MicrosoftCXXNameMangler::mangleType(const AutoType *T, Qualifiers, + SourceRange Range) { assert(T->getDeducedType().isNull() && "expecting a dependent type!"); DiagnosticsEngine &Diags = Context.getDiags(); @@ -2072,7 +2179,7 @@ void MicrosoftCXXNameMangler::mangleType(const AutoType *T, SourceRange Range) { << Range; } -void MicrosoftCXXNameMangler::mangleType(const AtomicType *T, +void MicrosoftCXXNameMangler::mangleType(const AtomicType *T, Qualifiers, SourceRange Range) { DiagnosticsEngine &Diags = Context.getDiags(); unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, @@ -2273,6 +2380,74 @@ void MicrosoftMangleContextImpl::mangleCXXRTTIName(QualType T, Mangler.mangleType(T, SourceRange(), MicrosoftCXXNameMangler::QMM_Result); } +void MicrosoftMangleContextImpl::mangleCXXCatchHandlerType(QualType T, + uint32_t Flags, + raw_ostream &Out) { + MicrosoftCXXNameMangler Mangler(*this, Out); + Mangler.getStream() << "llvm.eh.handlertype."; + Mangler.mangleType(T, SourceRange(), MicrosoftCXXNameMangler::QMM_Result); + Mangler.getStream() << '.' << Flags; +} + +void MicrosoftMangleContextImpl::mangleCXXThrowInfo(QualType T, + bool IsConst, + bool IsVolatile, + uint32_t NumEntries, + raw_ostream &Out) { + MicrosoftCXXNameMangler Mangler(*this, Out); + Mangler.getStream() << "_TI"; + if (IsConst) + Mangler.getStream() << 'C'; + if (IsVolatile) + Mangler.getStream() << 'V'; + Mangler.getStream() << NumEntries; + Mangler.mangleType(T, SourceRange(), MicrosoftCXXNameMangler::QMM_Result); +} + +void MicrosoftMangleContextImpl::mangleCXXCatchableTypeArray( + QualType T, uint32_t NumEntries, raw_ostream &Out) { + MicrosoftCXXNameMangler Mangler(*this, Out); + Mangler.getStream() << "_CTA"; + Mangler.getStream() << NumEntries; + Mangler.mangleType(T, SourceRange(), MicrosoftCXXNameMangler::QMM_Result); +} + +void MicrosoftMangleContextImpl::mangleCXXCatchableType( + QualType T, const CXXConstructorDecl *CD, CXXCtorType CT, uint32_t Size, + uint32_t NVOffset, int32_t VBPtrOffset, uint32_t VBIndex, + raw_ostream &Out) { + MicrosoftCXXNameMangler Mangler(*this, Out); + Mangler.getStream() << "_CT"; + + llvm::SmallString<64> RTTIMangling; + { + llvm::raw_svector_ostream Stream(RTTIMangling); + mangleCXXRTTI(T, Stream); + } + Mangler.getStream() << RTTIMangling.substr(1); + + // VS2015 CTP6 omits the copy-constructor in the mangled name. This name is, + // in fact, superfluous but I'm not sure the change was made consciously. + // TODO: Revisit this when VS2015 gets released. + llvm::SmallString<64> CopyCtorMangling; + if (CD) { + llvm::raw_svector_ostream Stream(CopyCtorMangling); + mangleCXXCtor(CD, CT, Stream); + } + Mangler.getStream() << CopyCtorMangling.substr(1); + + Mangler.getStream() << Size; + if (VBPtrOffset == -1) { + if (NVOffset) { + Mangler.getStream() << NVOffset; + } + } else { + Mangler.getStream() << NVOffset; + Mangler.getStream() << VBPtrOffset; + Mangler.getStream() << VBIndex; + } +} + void MicrosoftMangleContextImpl::mangleCXXRTTIBaseClassDescriptor( const CXXRecordDecl *Derived, uint32_t NVOffset, int32_t VBPtrOffset, uint32_t VBTableOffset, uint32_t Flags, raw_ostream &Out) { @@ -2318,6 +2493,28 @@ void MicrosoftMangleContextImpl::mangleCXXRTTICompleteObjectLocator( Mangler.getStream() << '@'; } +void MicrosoftMangleContextImpl::mangleSEHFilterExpression( + const NamedDecl *EnclosingDecl, raw_ostream &Out) { + MicrosoftCXXNameMangler Mangler(*this, Out); + // The function body is in the same comdat as the function with the handler, + // so the numbering here doesn't have to be the same across TUs. + // + // <mangled-name> ::= ?filt$ <filter-number> @0 + Mangler.getStream() << "\01?filt$" << SEHFilterIds[EnclosingDecl]++ << "@0@"; + Mangler.mangleName(EnclosingDecl); +} + +void MicrosoftMangleContextImpl::mangleSEHFinallyBlock( + const NamedDecl *EnclosingDecl, raw_ostream &Out) { + MicrosoftCXXNameMangler Mangler(*this, Out); + // The function body is in the same comdat as the function with the handler, + // so the numbering here doesn't have to be the same across TUs. + // + // <mangled-name> ::= ?fin$ <filter-number> @0 + Mangler.getStream() << "\01?fin$" << SEHFinallyIds[EnclosingDecl]++ << "@0@"; + Mangler.mangleName(EnclosingDecl); +} + void MicrosoftMangleContextImpl::mangleTypeName(QualType T, raw_ostream &Out) { // This is just a made up unique string for the purposes of tbaa. undname // does *not* know how to demangle it. @@ -2329,7 +2526,7 @@ void MicrosoftMangleContextImpl::mangleTypeName(QualType T, raw_ostream &Out) { void MicrosoftMangleContextImpl::mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type, raw_ostream &Out) { - MicrosoftCXXNameMangler mangler(*this, Out); + MicrosoftCXXNameMangler mangler(*this, Out, D, Type); mangler.mangle(D); } @@ -2348,18 +2545,18 @@ void MicrosoftMangleContextImpl::mangleReferenceTemporary(const VarDecl *VD, getDiags().Report(VD->getLocation(), DiagID); } +void MicrosoftMangleContextImpl::mangleThreadSafeStaticGuardVariable( + const VarDecl *VD, unsigned GuardNum, raw_ostream &Out) { + MicrosoftCXXNameMangler Mangler(*this, Out); + + Mangler.getStream() << "\01?$TSS" << GuardNum << '@'; + Mangler.mangleNestedName(VD); +} + void MicrosoftMangleContextImpl::mangleStaticGuardVariable(const VarDecl *VD, raw_ostream &Out) { - // TODO: This is not correct, especially with respect to VS "14". VS "14" - // utilizes thread local variables to implement thread safe, re-entrant - // initialization for statics. They no longer differentiate between an - // externally visible and non-externally visible static with respect to - // mangling, they all get $TSS <number>. - // - // N.B. This means that they can get more than 32 static variable guards in a - // scope. It also means that they broke compatibility with their own ABI. - // <guard-name> ::= ?_B <postfix> @5 <scope-depth> + // ::= ?__J <postfix> @5 <scope-depth> // ::= ?$S <guard-num> @ <postfix> @4IA // The first mangling is what MSVC uses to guard static locals in inline @@ -2371,8 +2568,11 @@ void MicrosoftMangleContextImpl::mangleStaticGuardVariable(const VarDecl *VD, MicrosoftCXXNameMangler Mangler(*this, Out); bool Visible = VD->isExternallyVisible(); - // <operator-name> ::= ?_B # local static guard - Mangler.getStream() << (Visible ? "\01??_B" : "\01?$S1@"); + if (Visible) { + Mangler.getStream() << (VD->getTLSKind() ? "\01??__J" : "\01??_B"); + } else { + Mangler.getStream() << "\01?$S1@"; + } unsigned ScopeDepth = 0; if (Visible && !getNextDiscriminator(VD, ScopeDepth)) // If we do not have a discriminator and are emitting a guard variable for @@ -2553,6 +2753,11 @@ void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL, Mangler.getStream() << '@'; } +void MicrosoftMangleContextImpl::mangleCXXVTableBitSet(const CXXRecordDecl *RD, + raw_ostream &Out) { + llvm::report_fatal_error("Cannot mangle bitsets yet"); +} + MicrosoftMangleContext * MicrosoftMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags) { return new MicrosoftMangleContextImpl(Context, Diags); diff --git a/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp b/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp index 3dc750a..2749100 100644 --- a/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp +++ b/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp @@ -27,7 +27,10 @@ IdentifierInfo *NSAPI::getNSClassId(NSClassIdKindKind K) const { "NSMutableArray", "NSDictionary", "NSMutableDictionary", - "NSNumber" + "NSNumber", + "NSMutableSet", + "NSCountedSet", + "NSMutableOrderedSet" }; if (!ClassIds[K]) @@ -124,6 +127,25 @@ Selector NSAPI::getNSArraySelector(NSArrayMethodKind MK) const { Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } + case NSMutableArr_addObject: + Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("addObject")); + break; + case NSMutableArr_insertObjectAtIndex: { + IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("insertObject"), + &Ctx.Idents.get("atIndex") + }; + Sel = Ctx.Selectors.getSelector(2, KeyIdents); + break; + } + case NSMutableArr_setObjectAtIndexedSubscript: { + IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("setObject"), + &Ctx.Idents.get("atIndexedSubscript") + }; + Sel = Ctx.Selectors.getSelector(2, KeyIdents); + break; + } } return (NSArraySelectors[MK] = Sel); } @@ -209,6 +231,22 @@ Selector NSAPI::getNSDictionarySelector( Sel = Ctx.Selectors.getSelector(2, KeyIdents); break; } + case NSMutableDict_setObjectForKeyedSubscript: { + IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("setObject"), + &Ctx.Idents.get("forKeyedSubscript") + }; + Sel = Ctx.Selectors.getSelector(2, KeyIdents); + break; + } + case NSMutableDict_setValueForKey: { + IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("setValue"), + &Ctx.Idents.get("forKey") + }; + Sel = Ctx.Selectors.getSelector(2, KeyIdents); + break; + } } return (NSDictionarySelectors[MK] = Sel); } @@ -227,6 +265,63 @@ NSAPI::getNSDictionaryMethodKind(Selector Sel) { return None; } +Selector NSAPI::getNSSetSelector(NSSetMethodKind MK) const { + if (NSSetSelectors[MK].isNull()) { + Selector Sel; + switch (MK) { + case NSMutableSet_addObject: + Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("addObject")); + break; + case NSOrderedSet_insertObjectAtIndex: { + IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("insertObject"), + &Ctx.Idents.get("atIndex") + }; + Sel = Ctx.Selectors.getSelector(2, KeyIdents); + break; + } + case NSOrderedSet_setObjectAtIndex: { + IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("setObject"), + &Ctx.Idents.get("atIndex") + }; + Sel = Ctx.Selectors.getSelector(2, KeyIdents); + break; + } + case NSOrderedSet_setObjectAtIndexedSubscript: { + IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("setObject"), + &Ctx.Idents.get("atIndexedSubscript") + }; + Sel = Ctx.Selectors.getSelector(2, KeyIdents); + break; + } + case NSOrderedSet_replaceObjectAtIndexWithObject: { + IdentifierInfo *KeyIdents[] = { + &Ctx.Idents.get("replaceObjectAtIndex"), + &Ctx.Idents.get("withObject") + }; + Sel = Ctx.Selectors.getSelector(2, KeyIdents); + break; + } + } + return (NSSetSelectors[MK] = Sel); + } + + return NSSetSelectors[MK]; +} + +Optional<NSAPI::NSSetMethodKind> +NSAPI::getNSSetMethodKind(Selector Sel) { + for (unsigned i = 0; i != NumNSSetMethods; ++i) { + NSSetMethodKind MK = NSSetMethodKind(i); + if (Sel == getNSSetSelector(MK)) + return MK; + } + + return None; +} + Selector NSAPI::getNSNumberLiteralSelector(NSNumberLiteralMethodKind MK, bool Instance) const { static const char *ClassSelectorName[NumNSNumberLiteralMethods] = { @@ -410,6 +505,11 @@ StringRef NSAPI::GetNSIntegralKind(QualType T) const { return StringRef(); } +bool NSAPI::isMacroDefined(StringRef Id) const { + // FIXME: Check whether the relevant module macros are visible. + return Ctx.Idents.get(Id).hasMacroDefinition(); +} + bool NSAPI::isObjCTypedef(QualType T, StringRef name, IdentifierInfo *&II) const { if (!Ctx.getLangOpts().ObjC1) diff --git a/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp b/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp index 0d070a4..2101a55 100644 --- a/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp +++ b/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp @@ -55,6 +55,52 @@ struct BaseSubobjectInfo { const BaseSubobjectInfo *Derived; }; +/// \brief Externally provided layout. Typically used when the AST source, such +/// as DWARF, lacks all the information that was available at compile time, such +/// as alignment attributes on fields and pragmas in effect. +struct ExternalLayout { + ExternalLayout() : Size(0), Align(0) {} + + /// \brief Overall record size in bits. + uint64_t Size; + + /// \brief Overall record alignment in bits. + uint64_t Align; + + /// \brief Record field offsets in bits. + llvm::DenseMap<const FieldDecl *, uint64_t> FieldOffsets; + + /// \brief Direct, non-virtual base offsets. + llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsets; + + /// \brief Virtual base offsets. + llvm::DenseMap<const CXXRecordDecl *, CharUnits> VirtualBaseOffsets; + + /// Get the offset of the given field. The external source must provide + /// entries for all fields in the record. + uint64_t getExternalFieldOffset(const FieldDecl *FD) { + assert(FieldOffsets.count(FD) && + "Field does not have an external offset"); + return FieldOffsets[FD]; + } + + bool getExternalNVBaseOffset(const CXXRecordDecl *RD, CharUnits &BaseOffset) { + auto Known = BaseOffsets.find(RD); + if (Known == BaseOffsets.end()) + return false; + BaseOffset = Known->second; + return true; + } + + bool getExternalVBaseOffset(const CXXRecordDecl *RD, CharUnits &BaseOffset) { + auto Known = VirtualBaseOffsets.find(RD); + if (Known == VirtualBaseOffsets.end()) + return false; + BaseOffset = Known->second; + return true; + } +}; + /// EmptySubobjectMap - Keeps track of which empty subobjects exist at different /// offsets while laying out a C++ class. class EmptySubobjectMap { @@ -541,7 +587,7 @@ protected: /// \brief Whether the external AST source has provided a layout for this /// record. - unsigned ExternalLayout : 1; + unsigned UseExternalLayout : 1; /// \brief Whether we need to infer alignment, even when we have an /// externally-provided layout. @@ -607,26 +653,14 @@ protected: /// avoid visiting virtual bases more than once. llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases; - /// \brief Externally-provided size. - uint64_t ExternalSize; - - /// \brief Externally-provided alignment. - uint64_t ExternalAlign; - - /// \brief Externally-provided field offsets. - llvm::DenseMap<const FieldDecl *, uint64_t> ExternalFieldOffsets; - - /// \brief Externally-provided direct, non-virtual base offsets. - llvm::DenseMap<const CXXRecordDecl *, CharUnits> ExternalBaseOffsets; - - /// \brief Externally-provided virtual base offsets. - llvm::DenseMap<const CXXRecordDecl *, CharUnits> ExternalVirtualBaseOffsets; + /// Valid if UseExternalLayout is true. + ExternalLayout External; RecordLayoutBuilder(const ASTContext &Context, EmptySubobjectMap *EmptySubobjects) : Context(Context), EmptySubobjects(EmptySubobjects), Size(0), Alignment(CharUnits::One()), UnpackedAlignment(CharUnits::One()), - ExternalLayout(false), InferAlignment(false), + UseExternalLayout(false), InferAlignment(false), Packed(false), IsUnion(false), IsMac68kAlign(false), IsMsStruct(false), UnfilledBitsInLastUnit(0), LastBitfieldTypeSize(0), MaxFieldAlignment(CharUnits::Zero()), @@ -748,8 +782,8 @@ protected: void setDataSize(CharUnits NewSize) { DataSize = Context.toBits(NewSize); } void setDataSize(uint64_t NewSize) { DataSize = NewSize; } - RecordLayoutBuilder(const RecordLayoutBuilder &) LLVM_DELETED_FUNCTION; - void operator=(const RecordLayoutBuilder &) LLVM_DELETED_FUNCTION; + RecordLayoutBuilder(const RecordLayoutBuilder &) = delete; + void operator=(const RecordLayoutBuilder &) = delete; }; } // end anonymous namespace @@ -1134,21 +1168,12 @@ CharUnits RecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) { // Query the external layout to see if it provides an offset. bool HasExternalLayout = false; - if (ExternalLayout) { + if (UseExternalLayout) { llvm::DenseMap<const CXXRecordDecl *, CharUnits>::iterator Known; - if (Base->IsVirtual) { - Known = ExternalVirtualBaseOffsets.find(Base->Class); - if (Known != ExternalVirtualBaseOffsets.end()) { - Offset = Known->second; - HasExternalLayout = true; - } - } else { - Known = ExternalBaseOffsets.find(Base->Class); - if (Known != ExternalBaseOffsets.end()) { - Offset = Known->second; - HasExternalLayout = true; - } - } + if (Base->IsVirtual) + HasExternalLayout = External.getExternalNVBaseOffset(Base->Class, Offset); + else + HasExternalLayout = External.getExternalVBaseOffset(Base->Class, Offset); } CharUnits UnpackedBaseAlign = Layout.getNonVirtualAlignment(); @@ -1235,18 +1260,15 @@ void RecordLayoutBuilder::InitializeLayout(const Decl *D) { // If there is an external AST source, ask it for the various offsets. if (const RecordDecl *RD = dyn_cast<RecordDecl>(D)) - if (ExternalASTSource *External = Context.getExternalSource()) { - ExternalLayout = External->layoutRecordType(RD, - ExternalSize, - ExternalAlign, - ExternalFieldOffsets, - ExternalBaseOffsets, - ExternalVirtualBaseOffsets); - + if (ExternalASTSource *Source = Context.getExternalSource()) { + UseExternalLayout = Source->layoutRecordType( + RD, External.Size, External.Align, External.FieldOffsets, + External.BaseOffsets, External.VirtualBaseOffsets); + // Update based on external alignment. - if (ExternalLayout) { - if (ExternalAlign > 0) { - Alignment = Context.toCharUnitsFromBits(ExternalAlign); + if (UseExternalLayout) { + if (External.Align > 0) { + Alignment = Context.toCharUnitsFromBits(External.Align); } else { // The external source didn't have alignment information; infer it. InferAlignment = true; @@ -1588,7 +1610,7 @@ void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) { // If we're using external layout, give the external layout a chance // to override this information. - if (ExternalLayout) + if (UseExternalLayout) FieldOffset = updateExternalFieldOffset(D, FieldOffset); // Okay, place the bitfield at the calculated offset. @@ -1604,7 +1626,7 @@ void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) { FieldAlign = UnpackedFieldAlign = 1; // Diagnose differences in layout due to padding or packing. - if (!ExternalLayout) + if (!UseExternalLayout) CheckFieldPadding(FieldOffset, UnpaddedFieldOffset, UnpackedFieldOffset, UnpackedFieldAlign, FieldPacked, D); @@ -1727,7 +1749,7 @@ void RecordLayoutBuilder::LayoutField(const FieldDecl *D, UnpackedFieldOffset = UnpackedFieldOffset.RoundUpToAlignment(UnpackedFieldAlign); - if (ExternalLayout) { + if (UseExternalLayout) { FieldOffset = Context.toCharUnitsFromBits( updateExternalFieldOffset(D, Context.toBits(FieldOffset))); @@ -1750,7 +1772,7 @@ void RecordLayoutBuilder::LayoutField(const FieldDecl *D, // Place this field at the current location. FieldOffsets.push_back(Context.toBits(FieldOffset)); - if (!ExternalLayout) + if (!UseExternalLayout) CheckFieldPadding(Context.toBits(FieldOffset), UnpaddedFieldOffset, Context.toBits(UnpackedFieldOffset), Context.toBits(UnpackedFieldAlign), FieldPacked, D); @@ -1802,15 +1824,15 @@ void RecordLayoutBuilder::FinishLayout(const NamedDecl *D) { uint64_t RoundedSize = llvm::RoundUpToAlignment(getSizeInBits(), Context.toBits(Alignment)); - if (ExternalLayout) { + if (UseExternalLayout) { // If we're inferring alignment, and the external size is smaller than // our size after we've rounded up to alignment, conservatively set the // alignment to 1. - if (InferAlignment && ExternalSize < RoundedSize) { + if (InferAlignment && External.Size < RoundedSize) { Alignment = CharUnits::One(); InferAlignment = false; } - setSize(ExternalSize); + setSize(External.Size); return; } @@ -1846,18 +1868,18 @@ void RecordLayoutBuilder::UpdateAlignment(CharUnits NewAlignment, CharUnits UnpackedNewAlignment) { // The alignment is not modified when using 'mac68k' alignment or when // we have an externally-supplied layout that also provides overall alignment. - if (IsMac68kAlign || (ExternalLayout && !InferAlignment)) + if (IsMac68kAlign || (UseExternalLayout && !InferAlignment)) return; if (NewAlignment > Alignment) { - assert(llvm::isPowerOf2_32(NewAlignment.getQuantity() && - "Alignment not a power of 2")); + assert(llvm::isPowerOf2_64(NewAlignment.getQuantity()) && + "Alignment not a power of 2"); Alignment = NewAlignment; } if (UnpackedNewAlignment > UnpackedAlignment) { - assert(llvm::isPowerOf2_32(UnpackedNewAlignment.getQuantity() && - "Alignment not a power of 2")); + assert(llvm::isPowerOf2_64(UnpackedNewAlignment.getQuantity()) && + "Alignment not a power of 2"); UnpackedAlignment = UnpackedNewAlignment; } } @@ -1865,11 +1887,8 @@ void RecordLayoutBuilder::UpdateAlignment(CharUnits NewAlignment, uint64_t RecordLayoutBuilder::updateExternalFieldOffset(const FieldDecl *Field, uint64_t ComputedOffset) { - assert(ExternalFieldOffsets.find(Field) != ExternalFieldOffsets.end() && - "Field does not have an external offset"); - - uint64_t ExternalFieldOffset = ExternalFieldOffsets[Field]; - + uint64_t ExternalFieldOffset = External.getExternalFieldOffset(Field); + if (InferAlignment && ExternalFieldOffset < ComputedOffset) { // The externally-supplied field offset is before the field offset we // computed. Assume that the structure is packed. @@ -2152,9 +2171,8 @@ struct MicrosoftRecordLayoutBuilder { typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsetsMapTy; MicrosoftRecordLayoutBuilder(const ASTContext &Context) : Context(Context) {} private: - MicrosoftRecordLayoutBuilder(const MicrosoftRecordLayoutBuilder &) - LLVM_DELETED_FUNCTION; - void operator=(const MicrosoftRecordLayoutBuilder &) LLVM_DELETED_FUNCTION; + MicrosoftRecordLayoutBuilder(const MicrosoftRecordLayoutBuilder &) = delete; + void operator=(const MicrosoftRecordLayoutBuilder &) = delete; public: void layout(const RecordDecl *RD); void cxxLayout(const CXXRecordDecl *RD); @@ -2252,6 +2270,13 @@ public: /// \brief True if this class is zero sized or first base is zero sized or /// has this property. Only used for MS-ABI. bool LeadsWithZeroSizedBase : 1; + + /// \brief True if the external AST source provided a layout for this record. + bool UseExternalLayout : 1; + + /// \brief The layout provided by the external AST source. Only active if + /// UseExternalLayout is true. + ExternalLayout External; }; } // namespace @@ -2354,8 +2379,9 @@ void MicrosoftRecordLayoutBuilder::initializeLayout(const RecordDecl *RD) { // In 64-bit mode we always perform an alignment step after laying out vbases. // In 32-bit mode we do not. The check to see if we need to perform alignment // checks the RequiredAlignment field and performs alignment if it isn't 0. - RequiredAlignment = Context.getTargetInfo().getPointerWidth(0) == 64 ? - CharUnits::One() : CharUnits::Zero(); + RequiredAlignment = Context.getTargetInfo().getTriple().isArch64Bit() + ? CharUnits::One() + : CharUnits::Zero(); // Compute the maximum field alignment. MaxFieldAlignment = CharUnits::Zero(); // Honor the default struct packing maximum alignment flag. @@ -2371,6 +2397,13 @@ void MicrosoftRecordLayoutBuilder::initializeLayout(const RecordDecl *RD) { // Packed attribute forces max field alignment to be 1. if (RD->hasAttr<PackedAttr>()) MaxFieldAlignment = CharUnits::One(); + + // Try to respect the external layout if present. + UseExternalLayout = false; + if (ExternalASTSource *Source = Context.getExternalSource()) + UseExternalLayout = Source->layoutRecordType( + RD, External.Size, External.Align, External.FieldOffsets, + External.BaseOffsets, External.VirtualBaseOffsets); } void @@ -2385,7 +2418,8 @@ MicrosoftRecordLayoutBuilder::initializeCXXLayout(const CXXRecordDecl *RD) { // injection. PointerInfo.Size = Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0)); - PointerInfo.Alignment = PointerInfo.Size; + PointerInfo.Alignment = + Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(0)); // Respect pragma pack. if (!MaxFieldAlignment.isZero()) PointerInfo.Alignment = std::min(PointerInfo.Alignment, MaxFieldAlignment); @@ -2475,7 +2509,18 @@ void MicrosoftRecordLayoutBuilder::layoutNonVirtualBase( BaseLayout.leadsWithZeroSizedBase()) Size++; ElementInfo Info = getAdjustedElementInfo(BaseLayout); - CharUnits BaseOffset = Size.RoundUpToAlignment(Info.Alignment); + CharUnits BaseOffset; + + // Respect the external AST source base offset, if present. + bool FoundBase = false; + if (UseExternalLayout) { + FoundBase = External.getExternalNVBaseOffset(BaseDecl, BaseOffset); + if (FoundBase) + assert(BaseOffset >= Size && "base offset already allocated"); + } + + if (!FoundBase) + BaseOffset = Size.RoundUpToAlignment(Info.Alignment); Bases.insert(std::make_pair(BaseDecl, BaseOffset)); Size = BaseOffset + BaseLayout.getNonVirtualSize(); PreviousBaseLayout = &BaseLayout; @@ -2499,7 +2544,14 @@ void MicrosoftRecordLayoutBuilder::layoutField(const FieldDecl *FD) { placeFieldAtOffset(CharUnits::Zero()); Size = std::max(Size, Info.Size); } else { - CharUnits FieldOffset = Size.RoundUpToAlignment(Info.Alignment); + CharUnits FieldOffset; + if (UseExternalLayout) { + FieldOffset = + Context.toCharUnitsFromBits(External.getExternalFieldOffset(FD)); + assert(FieldOffset >= Size && "field offset already allocated"); + } else { + FieldOffset = Size.RoundUpToAlignment(Info.Alignment); + } placeFieldAtOffset(FieldOffset); Size = FieldOffset + Info.Size; } @@ -2573,14 +2625,16 @@ void MicrosoftRecordLayoutBuilder::injectVBPtr(const CXXRecordDecl *RD) { CharUnits InjectionSite = VBPtrOffset; // But before we do, make sure it's properly aligned. VBPtrOffset = VBPtrOffset.RoundUpToAlignment(PointerInfo.Alignment); + // Shift everything after the vbptr down, unless we're using an external + // layout. + if (UseExternalLayout) + return; // Determine where the first field should be laid out after the vbptr. CharUnits FieldStart = VBPtrOffset + PointerInfo.Size; // Make sure that the amount we push the fields back by is a multiple of the // alignment. CharUnits Offset = (FieldStart - InjectionSite).RoundUpToAlignment( std::max(RequiredAlignment, Alignment)); - // Increase the size of the object and push back all fields by the offset - // amount. Size += Offset; for (uint64_t &FieldOffset : FieldOffsets) FieldOffset += Context.toBits(Offset); @@ -2647,7 +2701,18 @@ void MicrosoftRecordLayoutBuilder::layoutVirtualBases(const CXXRecordDecl *RD) { } // Insert the virtual base. ElementInfo Info = getAdjustedElementInfo(BaseLayout); - CharUnits BaseOffset = Size.RoundUpToAlignment(Info.Alignment); + CharUnits BaseOffset; + + // Respect the external AST source base offset, if present. + bool FoundBase = false; + if (UseExternalLayout) { + FoundBase = External.getExternalVBaseOffset(BaseDecl, BaseOffset); + if (FoundBase) + assert(BaseOffset >= Size && "base offset already allocated"); + } + if (!FoundBase) + BaseOffset = Size.RoundUpToAlignment(Info.Alignment); + VBases.insert(std::make_pair(BaseDecl, ASTRecordLayout::VBaseInfo(BaseOffset, HasVtordisp))); Size = BaseOffset + BaseLayout.getNonVirtualSize(); @@ -2677,6 +2742,12 @@ void MicrosoftRecordLayoutBuilder::finalizeLayout(const RecordDecl *RD) { else Size = MinEmptyStructSize; } + + if (UseExternalLayout) { + Size = Context.toCharUnitsFromBits(External.Size); + if (External.Align) + Alignment = Context.toCharUnitsFromBits(External.Align); + } } // Recursively walks the non-virtual bases of a class and determines if any of @@ -2815,7 +2886,7 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const { const ASTRecordLayout *NewEntry = nullptr; - if (isMsLayout(D) && !D->getASTContext().getExternalSource()) { + if (isMsLayout(D)) { NewEntry = BuildMicrosoftASTRecordLayout(D); } else if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) { EmptySubobjectMap EmptySubobjects(*this, RD); @@ -2905,11 +2976,11 @@ void ASTContext::setNonKeyFunction(const CXXMethodDecl *Method) { // Look up the cache entry. Since we're working with the first // declaration, its parent must be the class definition, which is // the correct key for the KeyFunctions hash. - llvm::DenseMap<const CXXRecordDecl*, LazyDeclPtr>::iterator - I = KeyFunctions.find(Method->getParent()); + const auto &Map = KeyFunctions; + auto I = Map.find(Method->getParent()); // If it's not cached, there's nothing to do. - if (I == KeyFunctions.end()) return; + if (I == Map.end()) return; // If it is cached, check whether it's the target method, and if so, // remove it from the cache. Note, the call to 'get' might invalidate diff --git a/contrib/llvm/tools/clang/lib/AST/Stmt.cpp b/contrib/llvm/tools/clang/lib/AST/Stmt.cpp index 68c7e72..6baa99b 100644 --- a/contrib/llvm/tools/clang/lib/AST/Stmt.cpp +++ b/contrib/llvm/tools/clang/lib/AST/Stmt.cpp @@ -95,10 +95,16 @@ void Stmt::EnableStatistics() { Stmt *Stmt::IgnoreImplicit() { Stmt *s = this; - if (ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(s)) + if (auto *ewc = dyn_cast<ExprWithCleanups>(s)) s = ewc->getSubExpr(); - while (ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(s)) + if (auto *mte = dyn_cast<MaterializeTemporaryExpr>(s)) + s = mte->GetTemporaryExpr(); + + if (auto *bte = dyn_cast<CXXBindTemporaryExpr>(s)) + s = bte->getSubExpr(); + + while (auto *ice = dyn_cast<ImplicitCastExpr>(s)) s = ice->getSubExpr(); return s; @@ -934,8 +940,7 @@ void ForStmt::setConditionVariable(const ASTContext &C, VarDecl *V) { } SwitchStmt::SwitchStmt(const ASTContext &C, VarDecl *Var, Expr *cond) - : Stmt(SwitchStmtClass), FirstCase(nullptr), AllEnumCasesCovered(0) -{ + : Stmt(SwitchStmtClass), FirstCase(nullptr, false) { setConditionVariable(C, Var); SubExprs[COND] = cond; SubExprs[BODY] = nullptr; @@ -1241,17 +1246,47 @@ OMPFirstprivateClause *OMPFirstprivateClause::CreateEmpty(const ASTContext &C, return new (Mem) OMPFirstprivateClause(N); } -OMPLastprivateClause *OMPLastprivateClause::Create(const ASTContext &C, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc, - ArrayRef<Expr *> VL) { +void OMPLastprivateClause::setPrivateCopies(ArrayRef<Expr *> PrivateCopies) { + assert(PrivateCopies.size() == varlist_size() && + "Number of private copies is not the same as the preallocated buffer"); + std::copy(PrivateCopies.begin(), PrivateCopies.end(), varlist_end()); +} + +void OMPLastprivateClause::setSourceExprs(ArrayRef<Expr *> SrcExprs) { + assert(SrcExprs.size() == varlist_size() && "Number of source expressions is " + "not the same as the " + "preallocated buffer"); + std::copy(SrcExprs.begin(), SrcExprs.end(), getPrivateCopies().end()); +} + +void OMPLastprivateClause::setDestinationExprs(ArrayRef<Expr *> DstExprs) { + assert(DstExprs.size() == varlist_size() && "Number of destination " + "expressions is not the same as " + "the preallocated buffer"); + std::copy(DstExprs.begin(), DstExprs.end(), getSourceExprs().end()); +} + +void OMPLastprivateClause::setAssignmentOps(ArrayRef<Expr *> AssignmentOps) { + assert(AssignmentOps.size() == varlist_size() && + "Number of assignment expressions is not the same as the preallocated " + "buffer"); + std::copy(AssignmentOps.begin(), AssignmentOps.end(), + getDestinationExprs().end()); +} + +OMPLastprivateClause *OMPLastprivateClause::Create( + const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, + SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, + ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps) { void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLastprivateClause), llvm::alignOf<Expr *>()) + - sizeof(Expr *) * VL.size()); + 5 * sizeof(Expr *) * VL.size()); OMPLastprivateClause *Clause = new (Mem) OMPLastprivateClause(StartLoc, LParenLoc, EndLoc, VL.size()); Clause->setVarRefs(VL); + Clause->setSourceExprs(SrcExprs); + Clause->setDestinationExprs(DstExprs); + Clause->setAssignmentOps(AssignmentOps); return Clause; } @@ -1259,7 +1294,7 @@ OMPLastprivateClause *OMPLastprivateClause::CreateEmpty(const ASTContext &C, unsigned N) { void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLastprivateClause), llvm::alignOf<Expr *>()) + - sizeof(Expr *) * N); + 5 * sizeof(Expr *) * N); return new (Mem) OMPLastprivateClause(N); } @@ -1285,27 +1320,56 @@ OMPSharedClause *OMPSharedClause::CreateEmpty(const ASTContext &C, return new (Mem) OMPSharedClause(N); } -OMPLinearClause *OMPLinearClause::Create(const ASTContext &C, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation ColonLoc, - SourceLocation EndLoc, - ArrayRef<Expr *> VL, Expr *Step) { +void OMPLinearClause::setInits(ArrayRef<Expr *> IL) { + assert(IL.size() == varlist_size() && + "Number of inits is not the same as the preallocated buffer"); + std::copy(IL.begin(), IL.end(), varlist_end()); +} + +void OMPLinearClause::setUpdates(ArrayRef<Expr *> UL) { + assert(UL.size() == varlist_size() && + "Number of updates is not the same as the preallocated buffer"); + std::copy(UL.begin(), UL.end(), getInits().end()); +} + +void OMPLinearClause::setFinals(ArrayRef<Expr *> FL) { + assert(FL.size() == varlist_size() && + "Number of final updates is not the same as the preallocated buffer"); + std::copy(FL.begin(), FL.end(), getUpdates().end()); +} + +OMPLinearClause * +OMPLinearClause::Create(const ASTContext &C, SourceLocation StartLoc, + SourceLocation LParenLoc, SourceLocation ColonLoc, + SourceLocation EndLoc, ArrayRef<Expr *> VL, + ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep) { + // Allocate space for 4 lists (Vars, Inits, Updates, Finals) and 2 expressions + // (Step and CalcStep). void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLinearClause), llvm::alignOf<Expr *>()) + - sizeof(Expr *) * (VL.size() + 1)); + (4 * VL.size() + 2) * sizeof(Expr *)); OMPLinearClause *Clause = new (Mem) OMPLinearClause(StartLoc, LParenLoc, ColonLoc, EndLoc, VL.size()); Clause->setVarRefs(VL); + Clause->setInits(IL); + // Fill update and final expressions with zeroes, they are provided later, + // after the directive construction. + std::fill(Clause->getInits().end(), Clause->getInits().end() + VL.size(), + nullptr); + std::fill(Clause->getUpdates().end(), Clause->getUpdates().end() + VL.size(), + nullptr); Clause->setStep(Step); + Clause->setCalcStep(CalcStep); return Clause; } OMPLinearClause *OMPLinearClause::CreateEmpty(const ASTContext &C, unsigned NumVars) { + // Allocate space for 4 lists (Vars, Inits, Updates, Finals) and 2 expressions + // (Step and CalcStep). void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLinearClause), llvm::alignOf<Expr *>()) + - sizeof(Expr *) * (NumVars + 1)); + (4 * NumVars + 2) * sizeof(Expr *)); return new (Mem) OMPLinearClause(NumVars); } @@ -1331,17 +1395,41 @@ OMPAlignedClause *OMPAlignedClause::CreateEmpty(const ASTContext &C, return new (Mem) OMPAlignedClause(NumVars); } -OMPCopyinClause *OMPCopyinClause::Create(const ASTContext &C, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc, - ArrayRef<Expr *> VL) { +void OMPCopyinClause::setSourceExprs(ArrayRef<Expr *> SrcExprs) { + assert(SrcExprs.size() == varlist_size() && "Number of source expressions is " + "not the same as the " + "preallocated buffer"); + std::copy(SrcExprs.begin(), SrcExprs.end(), varlist_end()); +} + +void OMPCopyinClause::setDestinationExprs(ArrayRef<Expr *> DstExprs) { + assert(DstExprs.size() == varlist_size() && "Number of destination " + "expressions is not the same as " + "the preallocated buffer"); + std::copy(DstExprs.begin(), DstExprs.end(), getSourceExprs().end()); +} + +void OMPCopyinClause::setAssignmentOps(ArrayRef<Expr *> AssignmentOps) { + assert(AssignmentOps.size() == varlist_size() && + "Number of assignment expressions is not the same as the preallocated " + "buffer"); + std::copy(AssignmentOps.begin(), AssignmentOps.end(), + getDestinationExprs().end()); +} + +OMPCopyinClause *OMPCopyinClause::Create( + const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, + SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, + ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps) { void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyinClause), llvm::alignOf<Expr *>()) + - sizeof(Expr *) * VL.size()); + 4 * sizeof(Expr *) * VL.size()); OMPCopyinClause *Clause = new (Mem) OMPCopyinClause(StartLoc, LParenLoc, EndLoc, VL.size()); Clause->setVarRefs(VL); + Clause->setSourceExprs(SrcExprs); + Clause->setDestinationExprs(DstExprs); + Clause->setAssignmentOps(AssignmentOps); return Clause; } @@ -1349,21 +1437,45 @@ OMPCopyinClause *OMPCopyinClause::CreateEmpty(const ASTContext &C, unsigned N) { void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyinClause), llvm::alignOf<Expr *>()) + - sizeof(Expr *) * N); + 4 * sizeof(Expr *) * N); return new (Mem) OMPCopyinClause(N); } -OMPCopyprivateClause *OMPCopyprivateClause::Create(const ASTContext &C, - SourceLocation StartLoc, - SourceLocation LParenLoc, - SourceLocation EndLoc, - ArrayRef<Expr *> VL) { +void OMPCopyprivateClause::setSourceExprs(ArrayRef<Expr *> SrcExprs) { + assert(SrcExprs.size() == varlist_size() && "Number of source expressions is " + "not the same as the " + "preallocated buffer"); + std::copy(SrcExprs.begin(), SrcExprs.end(), varlist_end()); +} + +void OMPCopyprivateClause::setDestinationExprs(ArrayRef<Expr *> DstExprs) { + assert(DstExprs.size() == varlist_size() && "Number of destination " + "expressions is not the same as " + "the preallocated buffer"); + std::copy(DstExprs.begin(), DstExprs.end(), getSourceExprs().end()); +} + +void OMPCopyprivateClause::setAssignmentOps(ArrayRef<Expr *> AssignmentOps) { + assert(AssignmentOps.size() == varlist_size() && + "Number of assignment expressions is not the same as the preallocated " + "buffer"); + std::copy(AssignmentOps.begin(), AssignmentOps.end(), + getDestinationExprs().end()); +} + +OMPCopyprivateClause *OMPCopyprivateClause::Create( + const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, + SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, + ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps) { void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyprivateClause), llvm::alignOf<Expr *>()) + - sizeof(Expr *) * VL.size()); + 4 * sizeof(Expr *) * VL.size()); OMPCopyprivateClause *Clause = new (Mem) OMPCopyprivateClause(StartLoc, LParenLoc, EndLoc, VL.size()); Clause->setVarRefs(VL); + Clause->setSourceExprs(SrcExprs); + Clause->setDestinationExprs(DstExprs); + Clause->setAssignmentOps(AssignmentOps); return Clause; } @@ -1371,7 +1483,7 @@ OMPCopyprivateClause *OMPCopyprivateClause::CreateEmpty(const ASTContext &C, unsigned N) { void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyprivateClause), llvm::alignOf<Expr *>()) + - sizeof(Expr *) * N); + 4 * sizeof(Expr *) * N); return new (Mem) OMPCopyprivateClause(N); } @@ -1399,16 +1511,42 @@ void OMPLoopDirective::setFinals(ArrayRef<Expr *> A) { std::copy(A.begin(), A.end(), getFinals().begin()); } +void OMPReductionClause::setLHSExprs(ArrayRef<Expr *> LHSExprs) { + assert( + LHSExprs.size() == varlist_size() && + "Number of LHS expressions is not the same as the preallocated buffer"); + std::copy(LHSExprs.begin(), LHSExprs.end(), varlist_end()); +} + +void OMPReductionClause::setRHSExprs(ArrayRef<Expr *> RHSExprs) { + assert( + RHSExprs.size() == varlist_size() && + "Number of RHS expressions is not the same as the preallocated buffer"); + std::copy(RHSExprs.begin(), RHSExprs.end(), getLHSExprs().end()); +} + +void OMPReductionClause::setReductionOps(ArrayRef<Expr *> ReductionOps) { + assert(ReductionOps.size() == varlist_size() && "Number of reduction " + "expressions is not the same " + "as the preallocated buffer"); + std::copy(ReductionOps.begin(), ReductionOps.end(), getRHSExprs().end()); +} + OMPReductionClause *OMPReductionClause::Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL, - NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) { + NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, + ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, + ArrayRef<Expr *> ReductionOps) { void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPReductionClause), llvm::alignOf<Expr *>()) + - sizeof(Expr *) * VL.size()); + 4 * sizeof(Expr *) * VL.size()); OMPReductionClause *Clause = new (Mem) OMPReductionClause( StartLoc, LParenLoc, EndLoc, ColonLoc, VL.size(), QualifierLoc, NameInfo); Clause->setVarRefs(VL); + Clause->setLHSExprs(LHSExprs); + Clause->setRHSExprs(RHSExprs); + Clause->setReductionOps(ReductionOps); return Clause; } @@ -1416,7 +1554,7 @@ OMPReductionClause *OMPReductionClause::CreateEmpty(const ASTContext &C, unsigned N) { void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPReductionClause), llvm::alignOf<Expr *>()) + - sizeof(Expr *) * N); + 4 * sizeof(Expr *) * N); return new (Mem) OMPReductionClause(N); } @@ -1443,10 +1581,7 @@ OMPFlushClause *OMPFlushClause::CreateEmpty(const ASTContext &C, unsigned N) { const OMPClause * OMPExecutableDirective::getSingleClause(OpenMPClauseKind K) const { - auto ClauseFilter = - [=](const OMPClause *C) -> bool { return C->getClauseKind() == K; }; - OMPExecutableDirective::filtered_clause_iterator<decltype(ClauseFilter)> I( - clauses(), ClauseFilter); + auto &&I = getClausesOfKind(K); if (I) { auto *Clause = *I; @@ -1948,14 +2083,14 @@ OMPOrderedDirective *OMPOrderedDirective::CreateEmpty(const ASTContext &C, return new (Mem) OMPOrderedDirective(); } -OMPAtomicDirective * -OMPAtomicDirective::Create(const ASTContext &C, SourceLocation StartLoc, - SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, - Stmt *AssociatedStmt, Expr *X, Expr *V, Expr *E) { +OMPAtomicDirective *OMPAtomicDirective::Create( + const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, + ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V, + Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate) { unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPAtomicDirective), llvm::alignOf<OMPClause *>()); void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + - 4 * sizeof(Stmt *)); + 5 * sizeof(Stmt *)); OMPAtomicDirective *Dir = new (Mem) OMPAtomicDirective(StartLoc, EndLoc, Clauses.size()); Dir->setClauses(Clauses); @@ -1963,6 +2098,9 @@ OMPAtomicDirective::Create(const ASTContext &C, SourceLocation StartLoc, Dir->setX(X); Dir->setV(V); Dir->setExpr(E); + Dir->setUpdateExpr(UE); + Dir->IsXLHSInRHSPart = IsXLHSInRHSPart; + Dir->IsPostfixUpdate = IsPostfixUpdate; return Dir; } @@ -1972,7 +2110,7 @@ OMPAtomicDirective *OMPAtomicDirective::CreateEmpty(const ASTContext &C, unsigned Size = llvm::RoundUpToAlignment(sizeof(OMPAtomicDirective), llvm::alignOf<OMPClause *>()); void *Mem = - C.Allocate(Size + sizeof(OMPClause *) * NumClauses + 4 * sizeof(Stmt *)); + C.Allocate(Size + sizeof(OMPClause *) * NumClauses + 5 * sizeof(Stmt *)); return new (Mem) OMPAtomicDirective(NumClauses); } diff --git a/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp b/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp index 927a679..dc4f996 100644 --- a/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp +++ b/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp @@ -1069,7 +1069,8 @@ void StmtPrinter::VisitIntegerLiteral(IntegerLiteral *Node) { // Emit suffixes. Integer literals are always a builtin integer type. switch (Node->getType()->getAs<BuiltinType>()->getKind()) { default: llvm_unreachable("Unexpected type for integer literal!"); - case BuiltinType::SChar: OS << "i8"; break; + case BuiltinType::Char_S: + case BuiltinType::Char_U: OS << "i8"; break; case BuiltinType::UChar: OS << "Ui8"; break; case BuiltinType::Short: OS << "i16"; break; case BuiltinType::UShort: OS << "Ui16"; break; @@ -1373,24 +1374,24 @@ void StmtPrinter::VisitInitListExpr(InitListExpr* Node) { return; } - OS << "{ "; + OS << "{"; for (unsigned i = 0, e = Node->getNumInits(); i != e; ++i) { if (i) OS << ", "; if (Node->getInit(i)) PrintExpr(Node->getInit(i)); else - OS << "0"; + OS << "{}"; } - OS << " }"; + OS << "}"; } void StmtPrinter::VisitParenListExpr(ParenListExpr* Node) { - OS << "( "; + OS << "("; for (unsigned i = 0, e = Node->getNumExprs(); i != e; ++i) { if (i) OS << ", "; PrintExpr(Node->getExpr(i)); } - OS << " )"; + OS << ")"; } void StmtPrinter::VisitDesignatedInitExpr(DesignatedInitExpr *Node) { @@ -1621,6 +1622,15 @@ void StmtPrinter::VisitUserDefinedLiteral(UserDefinedLiteral *Node) { const TemplateArgumentList *Args = cast<FunctionDecl>(DRE->getDecl())->getTemplateSpecializationArgs(); assert(Args); + + if (Args->size() != 1) { + OS << "operator \"\" " << Node->getUDSuffix()->getName(); + TemplateSpecializationType::PrintTemplateArgumentList( + OS, Args->data(), Args->size(), Policy); + OS << "()"; + return; + } + const TemplateArgument &Pack = Args->get(0); for (const auto &P : Pack.pack_elements()) { char C = (char)P.getAsIntegral().getZExtValue(); @@ -1679,9 +1689,13 @@ void StmtPrinter::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *Node) { void StmtPrinter::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *Node) { Node->getType().print(OS, Policy); - OS << "("; + // If there are no parens, this is list-initialization, and the braces are + // part of the syntax of the inner construct. + if (Node->getLParenLoc().isValid()) + OS << "("; PrintExpr(Node->getSubExpr()); - OS << ")"; + if (Node->getLParenLoc().isValid()) + OS << ")"; } void StmtPrinter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *Node) { @@ -1690,7 +1704,12 @@ void StmtPrinter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *Node) { void StmtPrinter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *Node) { Node->getType().print(OS, Policy); - OS << "("; + if (Node->isStdInitListInitialization()) + /* Nothing to do; braces are part of creating the std::initializer_list. */; + else if (Node->isListInitialization()) + OS << "{"; + else + OS << "("; for (CXXTemporaryObjectExpr::arg_iterator Arg = Node->arg_begin(), ArgEnd = Node->arg_end(); Arg != ArgEnd; ++Arg) { @@ -1700,7 +1719,12 @@ void StmtPrinter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *Node) { OS << ", "; PrintExpr(*Arg); } - OS << ")"; + if (Node->isStdInitListInitialization()) + /* See above. */; + else if (Node->isListInitialization()) + OS << "}"; + else + OS << ")"; } void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) { @@ -1734,7 +1758,7 @@ void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) { break; case LCK_ByRef: - if (Node->getCaptureDefault() != LCD_ByRef || C->isInitCapture()) + if (Node->getCaptureDefault() != LCD_ByRef || Node->isInitCapture(C)) OS << '&'; OS << C->getCapturedVar()->getName(); break; @@ -1746,7 +1770,7 @@ void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) { llvm_unreachable("VLA type in explicit captures."); } - if (C->isInitCapture()) + if (Node->isInitCapture(C)) PrintExpr(C->getCapturedVar()->getInit()); } OS << ']'; @@ -1866,8 +1890,8 @@ void StmtPrinter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) { } void StmtPrinter::VisitCXXConstructExpr(CXXConstructExpr *E) { - if (E->isListInitialization()) - OS << "{ "; + if (E->isListInitialization() && !E->isStdInitListInitialization()) + OS << "{"; for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) { if (isa<CXXDefaultArgExpr>(E->getArg(i))) { @@ -1879,8 +1903,8 @@ void StmtPrinter::VisitCXXConstructExpr(CXXConstructExpr *E) { PrintExpr(E->getArg(i)); } - if (E->isListInitialization()) - OS << " }"; + if (E->isListInitialization() && !E->isStdInitListInitialization()) + OS << "}"; } void StmtPrinter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) { diff --git a/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp b/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp index d1f25d6..c66b153 100644 --- a/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp +++ b/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp @@ -298,8 +298,12 @@ void OMPClauseProfiler::VisitOMPDefaultClause(const OMPDefaultClause *C) { } void OMPClauseProfiler::VisitOMPProcBindClause(const OMPProcBindClause *C) { } void OMPClauseProfiler::VisitOMPScheduleClause(const OMPScheduleClause *C) { - if (C->getChunkSize()) + if (C->getChunkSize()) { Profiler->VisitStmt(C->getChunkSize()); + if (C->getHelperChunkSize()) { + Profiler->VisitStmt(C->getChunkSize()); + } + } } void OMPClauseProfiler::VisitOMPOrderedClause(const OMPOrderedClause *) {} @@ -346,6 +350,15 @@ OMPClauseProfiler::VisitOMPFirstprivateClause(const OMPFirstprivateClause *C) { void OMPClauseProfiler::VisitOMPLastprivateClause(const OMPLastprivateClause *C) { VisitOMPClauseList(C); + for (auto *E : C->source_exprs()) { + Profiler->VisitStmt(E); + } + for (auto *E : C->destination_exprs()) { + Profiler->VisitStmt(E); + } + for (auto *E : C->assignment_ops()) { + Profiler->VisitStmt(E); + } } void OMPClauseProfiler::VisitOMPSharedClause(const OMPSharedClause *C) { VisitOMPClauseList(C); @@ -356,10 +369,29 @@ void OMPClauseProfiler::VisitOMPReductionClause( C->getQualifierLoc().getNestedNameSpecifier()); Profiler->VisitName(C->getNameInfo().getName()); VisitOMPClauseList(C); + for (auto *E : C->lhs_exprs()) { + Profiler->VisitStmt(E); + } + for (auto *E : C->rhs_exprs()) { + Profiler->VisitStmt(E); + } + for (auto *E : C->reduction_ops()) { + Profiler->VisitStmt(E); + } } void OMPClauseProfiler::VisitOMPLinearClause(const OMPLinearClause *C) { VisitOMPClauseList(C); + for (auto *E : C->inits()) { + Profiler->VisitStmt(E); + } + for (auto *E : C->updates()) { + Profiler->VisitStmt(E); + } + for (auto *E : C->finals()) { + Profiler->VisitStmt(E); + } Profiler->VisitStmt(C->getStep()); + Profiler->VisitStmt(C->getCalcStep()); } void OMPClauseProfiler::VisitOMPAlignedClause(const OMPAlignedClause *C) { VisitOMPClauseList(C); @@ -367,10 +399,28 @@ void OMPClauseProfiler::VisitOMPAlignedClause(const OMPAlignedClause *C) { } void OMPClauseProfiler::VisitOMPCopyinClause(const OMPCopyinClause *C) { VisitOMPClauseList(C); + for (auto *E : C->source_exprs()) { + Profiler->VisitStmt(E); + } + for (auto *E : C->destination_exprs()) { + Profiler->VisitStmt(E); + } + for (auto *E : C->assignment_ops()) { + Profiler->VisitStmt(E); + } } void OMPClauseProfiler::VisitOMPCopyprivateClause(const OMPCopyprivateClause *C) { VisitOMPClauseList(C); + for (auto *E : C->source_exprs()) { + Profiler->VisitStmt(E); + } + for (auto *E : C->destination_exprs()) { + Profiler->VisitStmt(E); + } + for (auto *E : C->assignment_ops()) { + Profiler->VisitStmt(E); + } } void OMPClauseProfiler::VisitOMPFlushClause(const OMPFlushClause *C) { VisitOMPClauseList(C); diff --git a/contrib/llvm/tools/clang/lib/AST/Type.cpp b/contrib/llvm/tools/clang/lib/AST/Type.cpp index e4f364d..09bb769 100644 --- a/contrib/llvm/tools/clang/lib/AST/Type.cpp +++ b/contrib/llvm/tools/clang/lib/AST/Type.cpp @@ -170,15 +170,7 @@ DependentSizedExtVectorType::Profile(llvm::FoldingSetNodeID &ID, VectorType::VectorType(QualType vecType, unsigned nElements, QualType canonType, VectorKind vecKind) - : Type(Vector, canonType, vecType->isDependentType(), - vecType->isInstantiationDependentType(), - vecType->isVariablyModifiedType(), - vecType->containsUnexpandedParameterPack()), - ElementType(vecType) -{ - VectorTypeBits.VecKind = vecKind; - VectorTypeBits.NumElements = nElements; -} + : VectorType(Vector, vecType, nElements, canonType, vecKind) {} VectorType::VectorType(TypeClass tc, QualType vecType, unsigned nElements, QualType canonType, VectorKind vecKind) @@ -640,12 +632,13 @@ bool Type::hasIntegerRepresentation() const { bool Type::isIntegralType(ASTContext &Ctx) const { if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) return BT->getKind() >= BuiltinType::Bool && - BT->getKind() <= BuiltinType::Int128; - + BT->getKind() <= BuiltinType::Int128; + + // Complete enum types are integral in C. if (!Ctx.getLangOpts().CPlusPlus) if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) - return ET->getDecl()->isComplete(); // Complete enum types are integral in C. - + return ET->getDecl()->isComplete(); + return false; } @@ -736,7 +729,7 @@ bool Type::isSignedIntegerType() const { bool Type::isSignedIntegerOrEnumerationType() const { if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) { return BT->getKind() >= BuiltinType::Char_S && - BT->getKind() <= BuiltinType::Int128; + BT->getKind() <= BuiltinType::Int128; } if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) { @@ -1089,7 +1082,7 @@ bool QualType::isTrivialType(ASTContext &Context) const { bool QualType::isTriviallyCopyableType(ASTContext &Context) const { if ((*this)->isArrayType()) - return Context.getBaseElementType(*this).isTrivialType(Context); + return Context.getBaseElementType(*this).isTriviallyCopyableType(Context); if (Context.getLangOpts().ObjCAutoRefCount) { switch (getObjCLifetime()) { @@ -1586,8 +1579,9 @@ StringRef FunctionType::getNameForCallConv(CallingConv CC) { case CC_X86_64SysV: return "sysv_abi"; case CC_AAPCS: return "aapcs"; case CC_AAPCS_VFP: return "aapcs-vfp"; - case CC_PnaclCall: return "pnaclcall"; case CC_IntelOclBicc: return "intel_ocl_bicc"; + case CC_SpirFunction: return "spir_function"; + case CC_SpirKernel: return "spir_kernel"; } llvm_unreachable("Invalid calling convention."); @@ -1720,7 +1714,7 @@ bool FunctionProtoType::isNothrow(const ASTContext &Ctx, if (EST == EST_DynamicNone || EST == EST_BasicNoexcept) return true; - if (EST == EST_Dynamic && ResultIfDependent == true) { + if (EST == EST_Dynamic && ResultIfDependent) { // A dynamic exception specification is throwing unless every exception // type is an (unexpanded) pack expansion type. for (unsigned I = 0, N = NumExceptions; I != N; ++I) @@ -1936,7 +1930,6 @@ bool AttributedType::isCallingConv() const { case attr_pascal: case attr_ms_abi: case attr_sysv_abi: - case attr_pnaclcall: case attr_inteloclbicc: return true; } @@ -2378,6 +2371,11 @@ bool Type::isObjCNSObjectType() const { return typedefType->getDecl()->hasAttr<ObjCNSObjectAttr>(); return false; } +bool Type::isObjCIndependentClassType() const { + if (const TypedefType *typedefType = dyn_cast<TypedefType>(this)) + return typedefType->getDecl()->hasAttr<ObjCIndependentClassAttr>(); + return false; +} bool Type::isObjCRetainableType() const { return isObjCObjectPointerType() || isBlockPointerType() || diff --git a/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp b/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp index e36fc17..3928fe8 100644 --- a/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp +++ b/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp @@ -110,7 +110,7 @@ namespace { }; } -static void AppendTypeQualList(raw_ostream &OS, unsigned TypeQuals) { +static void AppendTypeQualList(raw_ostream &OS, unsigned TypeQuals, bool C99) { bool appendSpace = false; if (TypeQuals & Qualifiers::Const) { OS << "const"; @@ -123,7 +123,11 @@ static void AppendTypeQualList(raw_ostream &OS, unsigned TypeQuals) { } if (TypeQuals & Qualifiers::Restrict) { if (appendSpace) OS << ' '; - OS << "restrict"; + if (C99) { + OS << "restrict"; + } else { + OS << "__restrict"; + } } } @@ -432,7 +436,7 @@ void TypePrinter::printConstantArrayAfter(const ConstantArrayType *T, raw_ostream &OS) { OS << '['; if (T->getIndexTypeQualifiers().hasQualifiers()) { - AppendTypeQualList(OS, T->getIndexTypeCVRQualifiers()); + AppendTypeQualList(OS, T->getIndexTypeCVRQualifiers(), Policy.LangOpts.C99); OS << ' '; } @@ -465,7 +469,7 @@ void TypePrinter::printVariableArrayAfter(const VariableArrayType *T, raw_ostream &OS) { OS << '['; if (T->getIndexTypeQualifiers().hasQualifiers()) { - AppendTypeQualList(OS, T->getIndexTypeCVRQualifiers()); + AppendTypeQualList(OS, T->getIndexTypeCVRQualifiers(), Policy.LangOpts.C99); OS << ' '; } @@ -685,9 +689,6 @@ void TypePrinter::printFunctionProtoAfter(const FunctionProtoType *T, case CC_AAPCS_VFP: OS << " __attribute__((pcs(\"aapcs-vfp\")))"; break; - case CC_PnaclCall: - OS << " __attribute__((pnaclcall))"; - break; case CC_IntelOclBicc: OS << " __attribute__((intel_ocl_bicc))"; break; @@ -697,6 +698,10 @@ void TypePrinter::printFunctionProtoAfter(const FunctionProtoType *T, case CC_X86_64SysV: OS << " __attribute__((sysv_abi))"; break; + case CC_SpirFunction: + case CC_SpirKernel: + // Do nothing. These CCs are not available as attributes. + break; } } @@ -708,7 +713,7 @@ void TypePrinter::printFunctionProtoAfter(const FunctionProtoType *T, if (unsigned quals = T->getTypeQuals()) { OS << ' '; - AppendTypeQualList(OS, quals); + AppendTypeQualList(OS, quals, Policy.LangOpts.C99); } switch (T->getRefQualifier()) { @@ -1253,7 +1258,6 @@ void TypePrinter::printAttributedAfter(const AttributedType *T, OS << ')'; break; } - case AttributedType::attr_pnaclcall: OS << "pnaclcall"; break; case AttributedType::attr_inteloclbicc: OS << "inteloclbicc"; break; } OS << "))"; @@ -1473,7 +1477,7 @@ void Qualifiers::print(raw_ostream &OS, const PrintingPolicy& Policy, unsigned quals = getCVRQualifiers(); if (quals) { - AppendTypeQualList(OS, quals); + AppendTypeQualList(OS, quals, Policy.LangOpts.C99); addSpace = true; } if (unsigned addrspace = getAddressSpace()) { diff --git a/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp b/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp index ddb1f05..ca5f0aa 100644 --- a/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp +++ b/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp @@ -13,9 +13,11 @@ #include "clang/AST/VTableBuilder.h" #include "clang/AST/ASTContext.h" +#include "clang/AST/ASTDiagnostic.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/RecordLayout.h" #include "clang/Basic/TargetInfo.h" +#include "llvm/ADT/SetOperations.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/Support/Format.h" #include "llvm/Support/raw_ostream.h" @@ -216,7 +218,7 @@ FinalOverriders::FinalOverriders(const CXXRecordDecl *MostDerivedClass, #endif } -static BaseOffset ComputeBaseOffset(ASTContext &Context, +static BaseOffset ComputeBaseOffset(const ASTContext &Context, const CXXRecordDecl *DerivedRD, const CXXBasePath &Path) { CharUnits NonVirtualOffset = CharUnits::Zero(); @@ -255,7 +257,7 @@ static BaseOffset ComputeBaseOffset(ASTContext &Context, } -static BaseOffset ComputeBaseOffset(ASTContext &Context, +static BaseOffset ComputeBaseOffset(const ASTContext &Context, const CXXRecordDecl *BaseRD, const CXXRecordDecl *DerivedRD) { CXXBasePaths Paths(/*FindAmbiguities=*/false, @@ -411,7 +413,8 @@ void FinalOverriders::dump(raw_ostream &Out, BaseSubobject Base, for (const auto *MD : RD->methods()) { if (!MD->isVirtual()) continue; - + MD = MD->getCanonicalDecl(); + OverriderInfo Overrider = getOverrider(MD, Base.getBaseOffset()); Out << " "; @@ -695,6 +698,7 @@ void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base, for (const auto *MD : RD->methods()) { if (!MD->isVirtual()) continue; + MD = MD->getCanonicalDecl(); CharUnits OffsetOffset = getCurrentOffsetOffset(); @@ -1514,6 +1518,7 @@ void ItaniumVTableBuilder::AddMethods( for (const auto *MD : RD->methods()) { if (!MD->isVirtual()) continue; + MD = MD->getCanonicalDecl(); // Get the final overrider. FinalOverriders::OverriderInfo Overrider = @@ -2196,6 +2201,7 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) { // We only want virtual member functions. if (!MD->isVirtual()) continue; + MD = MD->getCanonicalDecl(); std::string MethodName = PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual, @@ -2585,7 +2591,9 @@ public: // Only include the RTTI component if we know that we will provide a // definition of the vftable. HasRTTIComponent = Context.getLangOpts().RTTIData && - !MostDerivedClass->hasAttr<DLLImportAttr>(); + !MostDerivedClass->hasAttr<DLLImportAttr>() && + MostDerivedClass->getTemplateSpecializationKind() != + TSK_ExplicitInstantiationDeclaration; LayoutVFTable(); @@ -2625,8 +2633,6 @@ public: void dumpLayout(raw_ostream &); }; -} // end namespace - /// InitialOverriddenDefinitionCollector - Finds the set of least derived bases /// that define the given method. struct InitialOverriddenDefinitionCollector { @@ -2641,6 +2647,8 @@ struct InitialOverriddenDefinitionCollector { } }; +} // end namespace + static bool BaseInSet(const CXXBaseSpecifier *Specifier, CXXBasePath &Path, void *BasesSet) { BasesSetVectorTy *Bases = (BasesSetVectorTy *)BasesSet; @@ -2730,8 +2738,9 @@ VFTableBuilder::ComputeThisOffset(FinalOverriders::OverriderInfo Overrider) { CharUnits ThisOffset = Overrider.Offset; CharUnits LastVBaseOffset; - // For each path from the overrider to the parents of the overridden methods, - // traverse the path, calculating the this offset in the most derived class. + // For each path from the overrider to the parents of the overridden + // methods, traverse the path, calculating the this offset in the most + // derived class. for (int J = 0, F = Path.size(); J != F; ++J) { const CXXBasePathElement &Element = Path[J]; QualType CurTy = Element.Base->getType(); @@ -2930,6 +2939,7 @@ static void GroupNewVirtualOverloads( typedef llvm::DenseMap<DeclarationName, unsigned> VisitedGroupIndicesTy; VisitedGroupIndicesTy VisitedGroupIndices; for (const auto *MD : RD->methods()) { + MD = MD->getCanonicalDecl(); VisitedGroupIndicesTy::iterator J; bool Inserted; std::tie(J, Inserted) = VisitedGroupIndices.insert( @@ -2962,7 +2972,8 @@ void VFTableBuilder::AddMethods(BaseSubobject Base, unsigned BaseDepth, const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); // See if this class expands a vftable of the base we look at, which is either - // the one defined by the vfptr base path or the primary base of the current class. + // the one defined by the vfptr base path or the primary base of the current + // class. const CXXRecordDecl *NextBase = nullptr, *NextLastVBase = LastVBase; CharUnits NextBaseOffset; if (BaseDepth < WhichVFPtr.PathToBaseWithVPtr.size()) { @@ -3020,7 +3031,8 @@ void VFTableBuilder::AddMethods(BaseSubobject Base, unsigned BaseDepth, ThisAdjustmentOffset); if (OverriddenMD) { - // If MD overrides anything in this vftable, we need to update the entries. + // If MD overrides anything in this vftable, we need to update the + // entries. MethodInfoMapTy::iterator OverriddenMDIterator = MethodInfoMap.find(OverriddenMD); @@ -3435,55 +3447,176 @@ MicrosoftVTableContext::~MicrosoftVTableContext() { llvm::DeleteContainerSeconds(VBaseInfo); } -static bool -findPathForVPtr(ASTContext &Context, const ASTRecordLayout &MostDerivedLayout, - const CXXRecordDecl *RD, CharUnits Offset, - llvm::SmallPtrSetImpl<const CXXRecordDecl *> &VBasesSeen, - VPtrInfo::BasePath &FullPath, VPtrInfo *Info) { - if (RD == Info->BaseWithVPtr && Offset == Info->FullOffsetInMDC) { - Info->PathToBaseWithVPtr = FullPath; - return true; +namespace { +typedef llvm::SetVector<BaseSubobject, std::vector<BaseSubobject>, + llvm::DenseSet<BaseSubobject>> FullPathTy; +} + +// This recursive function finds all paths from a subobject centered at +// (RD, Offset) to the subobject located at BaseWithVPtr. +static void findPathsToSubobject(ASTContext &Context, + const ASTRecordLayout &MostDerivedLayout, + const CXXRecordDecl *RD, CharUnits Offset, + BaseSubobject BaseWithVPtr, + FullPathTy &FullPath, + std::list<FullPathTy> &Paths) { + if (BaseSubobject(RD, Offset) == BaseWithVPtr) { + Paths.push_back(FullPath); + return; } const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); - // Recurse with non-virtual bases first. - // FIXME: Does this need to be in layout order? Virtual bases will be in base - // specifier order, which isn't necessarily layout order. - SmallVector<CXXBaseSpecifier, 4> Bases(RD->bases_begin(), RD->bases_end()); - std::stable_partition(Bases.begin(), Bases.end(), - [](CXXBaseSpecifier bs) { return !bs.isVirtual(); }); - - for (const auto &B : Bases) { - const CXXRecordDecl *Base = B.getType()->getAsCXXRecordDecl(); - CharUnits NewOffset; - if (!B.isVirtual()) - NewOffset = Offset + Layout.getBaseClassOffset(Base); - else { - if (!VBasesSeen.insert(Base).second) - return false; - NewOffset = MostDerivedLayout.getVBaseClassOffset(Base); - } - FullPath.push_back(Base); - if (findPathForVPtr(Context, MostDerivedLayout, Base, NewOffset, VBasesSeen, - FullPath, Info)) - return true; + for (const CXXBaseSpecifier &BS : RD->bases()) { + const CXXRecordDecl *Base = BS.getType()->getAsCXXRecordDecl(); + CharUnits NewOffset = BS.isVirtual() + ? MostDerivedLayout.getVBaseClassOffset(Base) + : Offset + Layout.getBaseClassOffset(Base); + FullPath.insert(BaseSubobject(Base, NewOffset)); + findPathsToSubobject(Context, MostDerivedLayout, Base, NewOffset, + BaseWithVPtr, FullPath, Paths); FullPath.pop_back(); } - return false; +} + +// Return the paths which are not subsets of other paths. +static void removeRedundantPaths(std::list<FullPathTy> &FullPaths) { + FullPaths.remove_if([&](const FullPathTy &SpecificPath) { + for (const FullPathTy &OtherPath : FullPaths) { + if (&SpecificPath == &OtherPath) + continue; + if (std::all_of(SpecificPath.begin(), SpecificPath.end(), + [&](const BaseSubobject &BSO) { + return OtherPath.count(BSO) != 0; + })) { + return true; + } + } + return false; + }); +} + +static CharUnits getOffsetOfFullPath(ASTContext &Context, + const CXXRecordDecl *RD, + const FullPathTy &FullPath) { + const ASTRecordLayout &MostDerivedLayout = + Context.getASTRecordLayout(RD); + CharUnits Offset = CharUnits::fromQuantity(-1); + for (const BaseSubobject &BSO : FullPath) { + const CXXRecordDecl *Base = BSO.getBase(); + // The first entry in the path is always the most derived record, skip it. + if (Base == RD) { + assert(Offset.getQuantity() == -1); + Offset = CharUnits::Zero(); + continue; + } + assert(Offset.getQuantity() != -1); + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + // While we know which base has to be traversed, we don't know if that base + // was a virtual base. + const CXXBaseSpecifier *BaseBS = std::find_if( + RD->bases_begin(), RD->bases_end(), [&](const CXXBaseSpecifier &BS) { + return BS.getType()->getAsCXXRecordDecl() == Base; + }); + Offset = BaseBS->isVirtual() ? MostDerivedLayout.getVBaseClassOffset(Base) + : Offset + Layout.getBaseClassOffset(Base); + RD = Base; + } + return Offset; +} + +// We want to select the path which introduces the most covariant overrides. If +// two paths introduce overrides which the other path doesn't contain, issue a +// diagnostic. +static const FullPathTy *selectBestPath(ASTContext &Context, + const CXXRecordDecl *RD, VPtrInfo *Info, + std::list<FullPathTy> &FullPaths) { + // Handle some easy cases first. + if (FullPaths.empty()) + return nullptr; + if (FullPaths.size() == 1) + return &FullPaths.front(); + + const FullPathTy *BestPath = nullptr; + typedef std::set<const CXXMethodDecl *> OverriderSetTy; + OverriderSetTy LastOverrides; + for (const FullPathTy &SpecificPath : FullPaths) { + assert(!SpecificPath.empty()); + OverriderSetTy CurrentOverrides; + const CXXRecordDecl *TopLevelRD = SpecificPath.begin()->getBase(); + // Find the distance from the start of the path to the subobject with the + // VPtr. + CharUnits BaseOffset = + getOffsetOfFullPath(Context, TopLevelRD, SpecificPath); + FinalOverriders Overriders(TopLevelRD, CharUnits::Zero(), TopLevelRD); + for (const CXXMethodDecl *MD : Info->BaseWithVPtr->methods()) { + if (!MD->isVirtual()) + continue; + FinalOverriders::OverriderInfo OI = + Overriders.getOverrider(MD->getCanonicalDecl(), BaseOffset); + const CXXMethodDecl *OverridingMethod = OI.Method; + // Only overriders which have a return adjustment introduce problematic + // thunks. + if (ComputeReturnAdjustmentBaseOffset(Context, OverridingMethod, MD) + .isEmpty()) + continue; + // It's possible that the overrider isn't in this path. If so, skip it + // because this path didn't introduce it. + const CXXRecordDecl *OverridingParent = OverridingMethod->getParent(); + if (std::none_of(SpecificPath.begin(), SpecificPath.end(), + [&](const BaseSubobject &BSO) { + return BSO.getBase() == OverridingParent; + })) + continue; + CurrentOverrides.insert(OverridingMethod); + } + OverriderSetTy NewOverrides = + llvm::set_difference(CurrentOverrides, LastOverrides); + if (NewOverrides.empty()) + continue; + OverriderSetTy MissingOverrides = + llvm::set_difference(LastOverrides, CurrentOverrides); + if (MissingOverrides.empty()) { + // This path is a strict improvement over the last path, let's use it. + BestPath = &SpecificPath; + std::swap(CurrentOverrides, LastOverrides); + } else { + // This path introduces an overrider with a conflicting covariant thunk. + DiagnosticsEngine &Diags = Context.getDiagnostics(); + const CXXMethodDecl *CovariantMD = *NewOverrides.begin(); + const CXXMethodDecl *ConflictMD = *MissingOverrides.begin(); + Diags.Report(RD->getLocation(), diag::err_vftable_ambiguous_component) + << RD; + Diags.Report(CovariantMD->getLocation(), diag::note_covariant_thunk) + << CovariantMD; + Diags.Report(ConflictMD->getLocation(), diag::note_covariant_thunk) + << ConflictMD; + } + } + // Go with the path that introduced the most covariant overrides. If there is + // no such path, pick the first path. + return BestPath ? BestPath : &FullPaths.front(); } static void computeFullPathsForVFTables(ASTContext &Context, const CXXRecordDecl *RD, VPtrInfoVector &Paths) { - llvm::SmallPtrSet<const CXXRecordDecl*, 4> VBasesSeen; const ASTRecordLayout &MostDerivedLayout = Context.getASTRecordLayout(RD); - VPtrInfo::BasePath FullPath; + FullPathTy FullPath; + std::list<FullPathTy> FullPaths; for (VPtrInfo *Info : Paths) { - findPathForVPtr(Context, MostDerivedLayout, RD, CharUnits::Zero(), - VBasesSeen, FullPath, Info); - VBasesSeen.clear(); + findPathsToSubobject( + Context, MostDerivedLayout, RD, CharUnits::Zero(), + BaseSubobject(Info->BaseWithVPtr, Info->FullOffsetInMDC), FullPath, + FullPaths); FullPath.clear(); + removeRedundantPaths(FullPaths); + Info->PathToBaseWithVPtr.clear(); + if (const FullPathTy *BestPath = + selectBestPath(Context, RD, Info, FullPaths)) + for (const BaseSubobject &BSO : *BestPath) + Info->PathToBaseWithVPtr.push_back(BSO.getBase()); + FullPaths.clear(); } } diff --git a/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchFinder.cpp b/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchFinder.cpp index fa7968a..c5f3063 100644 --- a/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchFinder.cpp +++ b/contrib/llvm/tools/clang/lib/ASTMatchers/ASTMatchFinder.cpp @@ -300,7 +300,7 @@ public: const MatchFinder::MatchFinderOptions &Options) : Matchers(Matchers), Options(Options), ActiveASTContext(nullptr) {} - ~MatchASTVisitor() { + ~MatchASTVisitor() override { if (Options.CheckProfiling) { Options.CheckProfiling->Records = std::move(TimeByBucket); } diff --git a/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp b/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp index b0abdc7..f6d3449 100644 --- a/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp +++ b/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp @@ -12,7 +12,6 @@ namespace clang { namespace ast_matchers { namespace dynamic { - Diagnostics::ArgStream Diagnostics::pushContextFrame(ContextType Type, SourceRange Range) { ContextStack.push_back(ContextFrame()); @@ -75,7 +74,7 @@ Diagnostics::ArgStream Diagnostics::addError(const SourceRange &Range, return ArgStream(&Last.Messages.back().Args); } -StringRef contextTypeToFormatString(Diagnostics::ContextType Type) { +static StringRef contextTypeToFormatString(Diagnostics::ContextType Type) { switch (Type) { case Diagnostics::CT_MatcherConstruct: return "Error building matcher $0."; @@ -85,7 +84,7 @@ StringRef contextTypeToFormatString(Diagnostics::ContextType Type) { llvm_unreachable("Unknown ContextType value."); } -StringRef errorTypeToFormatString(Diagnostics::ErrorType Type) { +static StringRef errorTypeToFormatString(Diagnostics::ErrorType Type) { switch (Type) { case Diagnostics::ET_RegistryMatcherNotFound: return "Matcher not found: $0"; @@ -130,8 +129,9 @@ StringRef errorTypeToFormatString(Diagnostics::ErrorType Type) { llvm_unreachable("Unknown ErrorType value."); } -void formatErrorString(StringRef FormatString, ArrayRef<std::string> Args, - llvm::raw_ostream &OS) { +static void formatErrorString(StringRef FormatString, + ArrayRef<std::string> Args, + llvm::raw_ostream &OS) { while (!FormatString.empty()) { std::pair<StringRef, StringRef> Pieces = FormatString.split("$"); OS << Pieces.first.str(); diff --git a/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Marshallers.h b/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Marshallers.h index b78bc03..36a6415 100644 --- a/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Marshallers.h +++ b/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Marshallers.h @@ -181,18 +181,20 @@ public: ArgKinds(ArgKinds.begin(), ArgKinds.end()) {} VariantMatcher create(const SourceRange &NameRange, - ArrayRef<ParserValue> Args, Diagnostics *Error) const { + ArrayRef<ParserValue> Args, + Diagnostics *Error) const override { return Marshaller(Func, MatcherName, NameRange, Args, Error); } - bool isVariadic() const { return false; } - unsigned getNumArgs() const { return ArgKinds.size(); } + bool isVariadic() const override { return false; } + unsigned getNumArgs() const override { return ArgKinds.size(); } void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo, - std::vector<ArgKind> &Kinds) const { + std::vector<ArgKind> &Kinds) const override { Kinds.push_back(ArgKinds[ArgNo]); } - bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind, unsigned *Specificity, - ast_type_traits::ASTNodeKind *LeastDerivedKind) const { + bool isConvertibleTo( + ast_type_traits::ASTNodeKind Kind, unsigned *Specificity, + ast_type_traits::ASTNodeKind *LeastDerivedKind) const override { return isRetKindConvertibleTo(RetKinds, Kind, Specificity, LeastDerivedKind); } @@ -333,18 +335,20 @@ public: } VariantMatcher create(const SourceRange &NameRange, - ArrayRef<ParserValue> Args, Diagnostics *Error) const { + ArrayRef<ParserValue> Args, + Diagnostics *Error) const override { return Func(MatcherName, NameRange, Args, Error); } - bool isVariadic() const { return true; } - unsigned getNumArgs() const { return 0; } + bool isVariadic() const override { return true; } + unsigned getNumArgs() const override { return 0; } void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo, - std::vector<ArgKind> &Kinds) const { + std::vector<ArgKind> &Kinds) const override { Kinds.push_back(ArgsKind); } - bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind, unsigned *Specificity, - ast_type_traits::ASTNodeKind *LeastDerivedKind) const { + bool isConvertibleTo( + ast_type_traits::ASTNodeKind Kind, unsigned *Specificity, + ast_type_traits::ASTNodeKind *LeastDerivedKind) const override { return isRetKindConvertibleTo(RetKinds, Kind, Specificity, LeastDerivedKind); } @@ -487,11 +491,11 @@ public: OverloadedMatcherDescriptor(ArrayRef<MatcherDescriptor *> Callbacks) : Overloads(Callbacks.begin(), Callbacks.end()) {} - virtual ~OverloadedMatcherDescriptor() {} + ~OverloadedMatcherDescriptor() override {} - virtual VariantMatcher create(const SourceRange &NameRange, - ArrayRef<ParserValue> Args, - Diagnostics *Error) const { + VariantMatcher create(const SourceRange &NameRange, + ArrayRef<ParserValue> Args, + Diagnostics *Error) const override { std::vector<VariantMatcher> Constructed; Diagnostics::OverloadContext Ctx(Error); for (const auto &O : Overloads) { @@ -512,7 +516,7 @@ public: return Constructed[0]; } - bool isVariadic() const { + bool isVariadic() const override { bool Overload0Variadic = Overloads[0]->isVariadic(); #ifndef NDEBUG for (const auto &O : Overloads) { @@ -522,7 +526,7 @@ public: return Overload0Variadic; } - unsigned getNumArgs() const { + unsigned getNumArgs() const override { unsigned Overload0NumArgs = Overloads[0]->getNumArgs(); #ifndef NDEBUG for (const auto &O : Overloads) { @@ -533,15 +537,16 @@ public: } void getArgKinds(ast_type_traits::ASTNodeKind ThisKind, unsigned ArgNo, - std::vector<ArgKind> &Kinds) const { + std::vector<ArgKind> &Kinds) const override { for (const auto &O : Overloads) { if (O->isConvertibleTo(ThisKind)) O->getArgKinds(ThisKind, ArgNo, Kinds); } } - bool isConvertibleTo(ast_type_traits::ASTNodeKind Kind, unsigned *Specificity, - ast_type_traits::ASTNodeKind *LeastDerivedKind) const { + bool isConvertibleTo( + ast_type_traits::ASTNodeKind Kind, unsigned *Specificity, + ast_type_traits::ASTNodeKind *LeastDerivedKind) const override { for (const auto &O : Overloads) { if (O->isConvertibleTo(Kind, Specificity, LeastDerivedKind)) return true; @@ -562,9 +567,9 @@ public: : MinCount(MinCount), MaxCount(MaxCount), Op(Op), MatcherName(MatcherName) {} - virtual VariantMatcher create(const SourceRange &NameRange, - ArrayRef<ParserValue> Args, - Diagnostics *Error) const override { + VariantMatcher create(const SourceRange &NameRange, + ArrayRef<ParserValue> Args, + Diagnostics *Error) const override { if (Args.size() < MinCount || MaxCount < Args.size()) { const std::string MaxStr = (MaxCount == UINT_MAX ? "" : Twine(MaxCount)).str(); diff --git a/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Registry.cpp b/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Registry.cpp index d550a89..04d3a32 100644 --- a/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Registry.cpp +++ b/contrib/llvm/tools/clang/lib/ASTMatchers/Dynamic/Registry.cpp @@ -128,6 +128,7 @@ RegistryMaps::RegistryMaps() { REGISTER_MATCHER(constructorDecl); REGISTER_MATCHER(containsDeclaration); REGISTER_MATCHER(continueStmt); + REGISTER_MATCHER(conversionDecl); REGISTER_MATCHER(cStyleCastExpr); REGISTER_MATCHER(ctorInitializer); REGISTER_MATCHER(CUDAKernelCallExpr); @@ -198,6 +199,7 @@ RegistryMaps::RegistryMaps() { REGISTER_MATCHER(hasIncrement); REGISTER_MATCHER(hasIndex); REGISTER_MATCHER(hasInitializer); + REGISTER_MATCHER(hasKeywordSelector); REGISTER_MATCHER(hasLHS); REGISTER_MATCHER(hasLocalQualifiers); REGISTER_MATCHER(hasLocalStorage); @@ -205,6 +207,7 @@ RegistryMaps::RegistryMaps() { REGISTER_MATCHER(hasLoopVariable); REGISTER_MATCHER(hasMethod); REGISTER_MATCHER(hasName); + REGISTER_MATCHER(hasNullSelector); REGISTER_MATCHER(hasObjectExpression); REGISTER_MATCHER(hasOperatorName); REGISTER_MATCHER(hasOverloadedOperatorName); @@ -212,7 +215,9 @@ RegistryMaps::RegistryMaps() { REGISTER_MATCHER(hasParent); REGISTER_MATCHER(hasQualifier); REGISTER_MATCHER(hasRangeInit); + REGISTER_MATCHER(hasReceiverType); REGISTER_MATCHER(hasRHS); + REGISTER_MATCHER(hasSelector); REGISTER_MATCHER(hasSingleDecl); REGISTER_MATCHER(hasSize); REGISTER_MATCHER(hasSizeExpr); @@ -223,6 +228,7 @@ RegistryMaps::RegistryMaps() { REGISTER_MATCHER(hasTrueExpression); REGISTER_MATCHER(hasTypeLoc); REGISTER_MATCHER(hasUnaryOperand); + REGISTER_MATCHER(hasUnarySelector); REGISTER_MATCHER(hasValueType); REGISTER_MATCHER(ifStmt); REGISTER_MATCHER(ignoringImpCasts); @@ -262,6 +268,7 @@ RegistryMaps::RegistryMaps() { REGISTER_MATCHER(lambdaExpr); REGISTER_MATCHER(lValueReferenceType); REGISTER_MATCHER(matchesName); + REGISTER_MATCHER(matchesSelector); REGISTER_MATCHER(materializeTemporaryExpr); REGISTER_MATCHER(member); REGISTER_MATCHER(memberCallExpr); @@ -276,7 +283,9 @@ RegistryMaps::RegistryMaps() { REGISTER_MATCHER(newExpr); REGISTER_MATCHER(nullPtrLiteralExpr); REGISTER_MATCHER(nullStmt); + REGISTER_MATCHER(numSelectorArgs); REGISTER_MATCHER(ofClass); + REGISTER_MATCHER(objcMessageExpr); REGISTER_MATCHER(on); REGISTER_MATCHER(onImplicitObjectArgument); REGISTER_MATCHER(operatorCallExpr); @@ -315,6 +324,7 @@ RegistryMaps::RegistryMaps() { REGISTER_MATCHER(throughUsingDecl); REGISTER_MATCHER(throwExpr); REGISTER_MATCHER(to); + REGISTER_MATCHER(translationUnitDecl); REGISTER_MATCHER(tryStmt); REGISTER_MATCHER(type); REGISTER_MATCHER(typedefDecl); diff --git a/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp b/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp index be66f32..4e623c8 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp +++ b/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp @@ -530,14 +530,14 @@ static DeclVec* LazyInitializeReferencedDecls(const BlockDecl *BD, return BV; } -std::pair<AnalysisDeclContext::referenced_decls_iterator, - AnalysisDeclContext::referenced_decls_iterator> +llvm::iterator_range<AnalysisDeclContext::referenced_decls_iterator> AnalysisDeclContext::getReferencedBlockVars(const BlockDecl *BD) { if (!ReferencedBlockVars) ReferencedBlockVars = new llvm::DenseMap<const BlockDecl*,void*>(); - DeclVec *V = LazyInitializeReferencedDecls(BD, (*ReferencedBlockVars)[BD], A); - return std::make_pair(V->begin(), V->end()); + const DeclVec *V = + LazyInitializeReferencedDecls(BD, (*ReferencedBlockVars)[BD], A); + return llvm::make_range(V->begin(), V->end()); } ManagedAnalysis *&AnalysisDeclContext::getAnalysisImpl(const void *tag) { diff --git a/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp b/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp index d9073aa..2744c5f 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp +++ b/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp @@ -156,7 +156,7 @@ public: return !(*this == rhs); } - LLVM_EXPLICIT operator bool() const { + explicit operator bool() const { return *this != const_iterator(); } @@ -203,9 +203,9 @@ int LocalScope::const_iterator::distance(LocalScope::const_iterator L) { return D; } -/// BlockScopePosPair - Structure for specifying position in CFG during its -/// build process. It consists of CFGBlock that specifies position in CFG graph -/// and LocalScope::const_iterator that specifies position in LocalScope graph. +/// Structure for specifying position in CFG during its build process. It +/// consists of CFGBlock that specifies position in CFG and +/// LocalScope::const_iterator that specifies position in LocalScope graph. struct BlockScopePosPair { BlockScopePosPair() : block(nullptr) {} BlockScopePosPair(CFGBlock *b, LocalScope::const_iterator scopePos) @@ -841,12 +841,12 @@ private: // must be false. llvm::APSInt IntVal; if (Bop->getLHS()->EvaluateAsInt(IntVal, *Context)) { - if (IntVal.getBoolValue() == false) { + if (!IntVal.getBoolValue()) { return TryResult(false); } } if (Bop->getRHS()->EvaluateAsInt(IntVal, *Context)) { - if (IntVal.getBoolValue() == false) { + if (!IntVal.getBoolValue()) { return TryResult(false); } } @@ -3950,9 +3950,8 @@ public: } } } - - virtual ~StmtPrinterHelper() {} + ~StmtPrinterHelper() override {} const LangOptions &getLangOpts() const { return LangOpts; } void setBlockID(signed i) { currentBlock = i; } diff --git a/contrib/llvm/tools/clang/lib/Analysis/Consumed.cpp b/contrib/llvm/tools/clang/lib/Analysis/Consumed.cpp index 2b2da2c..fa985ee 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/Consumed.cpp +++ b/contrib/llvm/tools/clang/lib/Analysis/Consumed.cpp @@ -946,10 +946,9 @@ void ConsumedStmtVisitor::VisitVarDecl(const VarDecl *Var) { namespace clang { namespace consumed { -void splitVarStateForIf(const IfStmt * IfNode, const VarTestResult &Test, - ConsumedStateMap *ThenStates, - ConsumedStateMap *ElseStates) { - +static void splitVarStateForIf(const IfStmt *IfNode, const VarTestResult &Test, + ConsumedStateMap *ThenStates, + ConsumedStateMap *ElseStates) { ConsumedState VarState = ThenStates->getState(Test.Var); if (VarState == CS_Unknown) { @@ -964,9 +963,9 @@ void splitVarStateForIf(const IfStmt * IfNode, const VarTestResult &Test, } } -void splitVarStateForIfBinOp(const PropagationInfo &PInfo, - ConsumedStateMap *ThenStates, ConsumedStateMap *ElseStates) { - +static void splitVarStateForIfBinOp(const PropagationInfo &PInfo, + ConsumedStateMap *ThenStates, + ConsumedStateMap *ElseStates) { const VarTestResult <est = PInfo.getLTest(), &RTest = PInfo.getRTest(); @@ -1443,7 +1442,7 @@ void ConsumedAnalyzer::run(AnalysisDeclContext &AC) { CurrStates, WarningsHandler); - if (BlockInfo.allBackEdgesVisited(*SI, CurrBlock)) + if (BlockInfo.allBackEdgesVisited(CurrBlock, *SI)) BlockInfo.discardInfo(*SI); } else { BlockInfo.addInfo(*SI, CurrStates, OwnershipTaken); diff --git a/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp index 662166c..0948bc0 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp +++ b/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp @@ -256,16 +256,17 @@ clang::analyze_format_string::ParseLengthModifier(FormatSpecifier &FS, // Methods on ArgType. //===----------------------------------------------------------------------===// -bool ArgType::matchesType(ASTContext &C, QualType argTy) const { +clang::analyze_format_string::ArgType::MatchKind +ArgType::matchesType(ASTContext &C, QualType argTy) const { if (Ptr) { // It has to be a pointer. const PointerType *PT = argTy->getAs<PointerType>(); if (!PT) - return false; + return NoMatch; // We cannot write through a const qualified pointer. if (PT->getPointeeType().isConstQualified()) - return false; + return NoMatch; argTy = PT->getPointeeType(); } @@ -275,8 +276,8 @@ bool ArgType::matchesType(ASTContext &C, QualType argTy) const { llvm_unreachable("ArgType must be valid"); case UnknownTy: - return true; - + return Match; + case AnyCharTy: { if (const EnumType *ETy = argTy->getAs<EnumType>()) argTy = ETy->getDecl()->getIntegerType(); @@ -289,18 +290,18 @@ bool ArgType::matchesType(ASTContext &C, QualType argTy) const { case BuiltinType::SChar: case BuiltinType::UChar: case BuiltinType::Char_U: - return true; + return Match; } - return false; + return NoMatch; } - + case SpecificTy: { if (const EnumType *ETy = argTy->getAs<EnumType>()) argTy = ETy->getDecl()->getIntegerType(); argTy = C.getCanonicalType(argTy).getUnqualifiedType(); if (T == argTy) - return true; + return Match; // Check for "compatible types". if (const BuiltinType *BT = argTy->getAs<BuiltinType>()) switch (BT->getKind()) { @@ -309,32 +310,33 @@ bool ArgType::matchesType(ASTContext &C, QualType argTy) const { case BuiltinType::Char_S: case BuiltinType::SChar: case BuiltinType::Char_U: - case BuiltinType::UChar: - return T == C.UnsignedCharTy || T == C.SignedCharTy; + case BuiltinType::UChar: + return T == C.UnsignedCharTy || T == C.SignedCharTy ? Match + : NoMatch; case BuiltinType::Short: - return T == C.UnsignedShortTy; + return T == C.UnsignedShortTy ? Match : NoMatch; case BuiltinType::UShort: - return T == C.ShortTy; + return T == C.ShortTy ? Match : NoMatch; case BuiltinType::Int: - return T == C.UnsignedIntTy; + return T == C.UnsignedIntTy ? Match : NoMatch; case BuiltinType::UInt: - return T == C.IntTy; + return T == C.IntTy ? Match : NoMatch; case BuiltinType::Long: - return T == C.UnsignedLongTy; + return T == C.UnsignedLongTy ? Match : NoMatch; case BuiltinType::ULong: - return T == C.LongTy; + return T == C.LongTy ? Match : NoMatch; case BuiltinType::LongLong: - return T == C.UnsignedLongLongTy; + return T == C.UnsignedLongLongTy ? Match : NoMatch; case BuiltinType::ULongLong: - return T == C.LongLongTy; + return T == C.LongLongTy ? Match : NoMatch; } - return false; + return NoMatch; } case CStrTy: { const PointerType *PT = argTy->getAs<PointerType>(); if (!PT) - return false; + return NoMatch; QualType pointeeTy = PT->getPointeeType(); if (const BuiltinType *BT = pointeeTy->getAs<BuiltinType>()) switch (BT->getKind()) { @@ -343,50 +345,56 @@ bool ArgType::matchesType(ASTContext &C, QualType argTy) const { case BuiltinType::UChar: case BuiltinType::Char_S: case BuiltinType::SChar: - return true; + return Match; default: break; } - return false; + return NoMatch; } case WCStrTy: { const PointerType *PT = argTy->getAs<PointerType>(); if (!PT) - return false; + return NoMatch; QualType pointeeTy = C.getCanonicalType(PT->getPointeeType()).getUnqualifiedType(); - return pointeeTy == C.getWideCharType(); + return pointeeTy == C.getWideCharType() ? Match : NoMatch; } - + case WIntTy: { - + QualType PromoArg = argTy->isPromotableIntegerType() ? C.getPromotedIntegerType(argTy) : argTy; - + QualType WInt = C.getCanonicalType(C.getWIntType()).getUnqualifiedType(); PromoArg = C.getCanonicalType(PromoArg).getUnqualifiedType(); - + // If the promoted argument is the corresponding signed type of the // wint_t type, then it should match. if (PromoArg->hasSignedIntegerRepresentation() && C.getCorrespondingUnsignedType(PromoArg) == WInt) - return true; + return Match; - return WInt == PromoArg; + return WInt == PromoArg ? Match : NoMatch; } case CPointerTy: - return argTy->isPointerType() || argTy->isObjCObjectPointerType() || - argTy->isBlockPointerType() || argTy->isNullPtrType(); + if (argTy->isVoidPointerType()) { + return Match; + } if (argTy->isPointerType() || argTy->isObjCObjectPointerType() || + argTy->isBlockPointerType() || argTy->isNullPtrType()) { + return NoMatchPedantic; + } else { + return NoMatch; + } case ObjCPointerTy: { if (argTy->getAs<ObjCObjectPointerType>() || argTy->getAs<BlockPointerType>()) - return true; - + return Match; + // Handle implicit toll-free bridging. if (const PointerType *PT = argTy->getAs<PointerType>()) { // Things such as CFTypeRef are really just opaque pointers @@ -395,9 +403,9 @@ bool ArgType::matchesType(ASTContext &C, QualType argTy) const { // structs can be toll-free bridged, we just accept them all. QualType pointee = PT->getPointeeType(); if (pointee->getAsStructureType() || pointee->isVoidType()) - return true; + return Match; } - return false; + return NoMatch; } } @@ -791,7 +799,8 @@ bool FormatSpecifier::hasStandardLengthModifier() const { llvm_unreachable("Invalid LengthModifier Kind!"); } -bool FormatSpecifier::hasStandardConversionSpecifier(const LangOptions &LangOpt) const { +bool FormatSpecifier::hasStandardConversionSpecifier( + const LangOptions &LangOpt) const { switch (CS.getKind()) { case ConversionSpecifier::cArg: case ConversionSpecifier::dArg: diff --git a/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp b/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp index 86b679c..0ab1580 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp +++ b/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp @@ -356,11 +356,8 @@ void TransferFunctions::VisitBinaryOperator(BinaryOperator *B) { } void TransferFunctions::VisitBlockExpr(BlockExpr *BE) { - AnalysisDeclContext::referenced_decls_iterator I, E; - std::tie(I, E) = - LV.analysisContext.getReferencedBlockVars(BE->getBlockDecl()); - for ( ; I != E ; ++I) { - const VarDecl *VD = *I; + for (const VarDecl *VD : + LV.analysisContext.getReferencedBlockVars(BE->getBlockDecl())) { if (isAlwaysAlive(VD)) continue; val.liveDecls = LV.DSetFact.add(val.liveDecls, VD); diff --git a/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp index f45d6e7..e2c6ab5 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp +++ b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp @@ -44,14 +44,13 @@ #include <sstream> #include <utility> #include <vector> - - -namespace clang { -namespace threadSafety { +using namespace clang; +using namespace threadSafety; // Key method definition ThreadSafetyHandler::~ThreadSafetyHandler() {} +namespace { class TILPrinter : public til::PrettyPrinter<TILPrinter, llvm::raw_ostream> {}; @@ -69,7 +68,6 @@ static void warnInvalidLock(ThreadSafetyHandler &Handler, Handler.handleInvalidLockExp(Kind, Loc); } - /// \brief A set of CapabilityInfo objects, which are compiled from the /// requires attributes on a function. class CapExprSet : public SmallVector<CapabilityExpr, 4> { @@ -101,17 +99,22 @@ private: LockKind LKind; ///< exclusive or shared SourceLocation AcquireLoc; ///< where it was acquired. bool Asserted; ///< true if the lock was asserted + bool Declared; ///< true if the lock was declared public: FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc, - bool Asrt) - : CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Asserted(Asrt) {} + bool Asrt, bool Declrd = false) + : CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Asserted(Asrt), + Declared(Declrd) {} virtual ~FactEntry() {} - LockKind kind() const { return LKind; } + LockKind kind() const { return LKind; } SourceLocation loc() const { return AcquireLoc; } bool asserted() const { return Asserted; } + bool declared() const { return Declared; } + + void setDeclared(bool D) { Declared = D; } virtual void handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan, @@ -231,14 +234,61 @@ public: FactEntry *findPartialMatch(FactManager &FM, const CapabilityExpr &CapE) const { - auto I = std::find_if(begin(), end(), [&](FactID ID) { + auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { return FM[ID].partiallyMatches(CapE); }); return I != end() ? &FM[*I] : nullptr; } + + bool containsMutexDecl(FactManager &FM, const ValueDecl* Vd) const { + auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool { + return FM[ID].valueDecl() == Vd; + }); + return I != end(); + } }; +class ThreadSafetyAnalyzer; +} // namespace + +namespace clang { +namespace threadSafety { +class BeforeSet { +private: + typedef SmallVector<const ValueDecl*, 4> BeforeVect; + + struct BeforeInfo { + BeforeInfo() : Vect(nullptr), Visited(false) { } + BeforeInfo(BeforeInfo &&O) + : Vect(std::move(O.Vect)), Visited(O.Visited) + {} + + std::unique_ptr<BeforeVect> Vect; + int Visited; + }; + + typedef llvm::DenseMap<const ValueDecl*, BeforeInfo> BeforeMap; + typedef llvm::DenseMap<const ValueDecl*, bool> CycleMap; + +public: + BeforeSet() { } + + BeforeInfo* insertAttrExprs(const ValueDecl* Vd, + ThreadSafetyAnalyzer& Analyzer); + + void checkBeforeAfter(const ValueDecl* Vd, + const FactSet& FSet, + ThreadSafetyAnalyzer& Analyzer, + SourceLocation Loc, StringRef CapKind); + +private: + BeforeMap BMap; + CycleMap CycMap; +}; +} // end namespace threadSafety +} // end namespace clang +namespace { typedef llvm::ImmutableMap<const NamedDecl*, unsigned> LocalVarContext; class LocalVariableMap; @@ -853,6 +903,7 @@ public: /// \brief Class which implements the core thread safety analysis routines. class ThreadSafetyAnalyzer { friend class BuildLockset; + friend class threadSafety::BeforeSet; llvm::BumpPtrAllocator Bpa; threadSafety::til::MemRegionRef Arena; @@ -864,9 +915,11 @@ class ThreadSafetyAnalyzer { FactManager FactMan; std::vector<CFGBlockInfo> BlockInfo; + BeforeSet* GlobalBeforeSet; + public: - ThreadSafetyAnalyzer(ThreadSafetyHandler &H) - : Arena(&Bpa), SxBuilder(Arena), Handler(H) {} + ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet* Bset) + : Arena(&Bpa), SxBuilder(Arena), Handler(H), GlobalBeforeSet(Bset) {} bool inCurrentScope(const CapabilityExpr &CapE); @@ -906,6 +959,134 @@ public: void runAnalysis(AnalysisDeclContext &AC); }; +} // namespace + +/// Process acquired_before and acquired_after attributes on Vd. +BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd, + ThreadSafetyAnalyzer& Analyzer) { + // Create a new entry for Vd. + auto& Entry = BMap.FindAndConstruct(Vd); + BeforeInfo* Info = &Entry.second; + BeforeVect* Bv = nullptr; + + for (Attr* At : Vd->attrs()) { + switch (At->getKind()) { + case attr::AcquiredBefore: { + auto *A = cast<AcquiredBeforeAttr>(At); + + // Create a new BeforeVect for Vd if necessary. + if (!Bv) { + Bv = new BeforeVect; + Info->Vect.reset(Bv); + } + // Read exprs from the attribute, and add them to BeforeVect. + for (const auto *Arg : A->args()) { + CapabilityExpr Cp = + Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr); + if (const ValueDecl *Cpvd = Cp.valueDecl()) { + Bv->push_back(Cpvd); + auto It = BMap.find(Cpvd); + if (It == BMap.end()) + insertAttrExprs(Cpvd, Analyzer); + } + } + break; + } + case attr::AcquiredAfter: { + auto *A = cast<AcquiredAfterAttr>(At); + + // Read exprs from the attribute, and add them to BeforeVect. + for (const auto *Arg : A->args()) { + CapabilityExpr Cp = + Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr); + if (const ValueDecl *ArgVd = Cp.valueDecl()) { + // Get entry for mutex listed in attribute + BeforeInfo* ArgInfo; + auto It = BMap.find(ArgVd); + if (It == BMap.end()) + ArgInfo = insertAttrExprs(ArgVd, Analyzer); + else + ArgInfo = &It->second; + + // Create a new BeforeVect if necessary. + BeforeVect* ArgBv = ArgInfo->Vect.get(); + if (!ArgBv) { + ArgBv = new BeforeVect; + ArgInfo->Vect.reset(ArgBv); + } + ArgBv->push_back(Vd); + } + } + break; + } + default: + break; + } + } + + return Info; +} + + +/// Return true if any mutexes in FSet are in the acquired_before set of Vd. +void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd, + const FactSet& FSet, + ThreadSafetyAnalyzer& Analyzer, + SourceLocation Loc, StringRef CapKind) { + SmallVector<BeforeInfo*, 8> InfoVect; + + // Do a depth-first traversal of Vd. + // Return true if there are cycles. + std::function<bool (const ValueDecl*)> traverse = [&](const ValueDecl* Vd) { + if (!Vd) + return false; + + BeforeSet::BeforeInfo* Info; + auto It = BMap.find(Vd); + if (It == BMap.end()) + Info = insertAttrExprs(Vd, Analyzer); + else + Info = &It->second; + + if (Info->Visited == 1) + return true; + + if (Info->Visited == 2) + return false; + + BeforeVect* Bv = Info->Vect.get(); + if (!Bv) + return false; + + InfoVect.push_back(Info); + Info->Visited = 1; + for (auto *Vdb : *Bv) { + // Exclude mutexes in our immediate before set. + if (FSet.containsMutexDecl(Analyzer.FactMan, Vdb)) { + StringRef L1 = StartVd->getName(); + StringRef L2 = Vdb->getName(); + Analyzer.Handler.handleLockAcquiredBefore(CapKind, L1, L2, Loc); + } + // Transitively search other before sets, and warn on cycles. + if (traverse(Vdb)) { + if (CycMap.find(Vd) == CycMap.end()) { + CycMap.insert(std::make_pair(Vd, true)); + StringRef L1 = Vd->getName(); + Analyzer.Handler.handleBeforeAfterCycle(L1, Vd->getLocation()); + } + } + } + Info->Visited = 2; + return false; + }; + + traverse(StartVd); + + for (auto* Info : InfoVect) + Info->Visited = 0; +} + + /// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs. static const ValueDecl *getValueDecl(const Expr *Exp) { @@ -921,6 +1102,7 @@ static const ValueDecl *getValueDecl(const Expr *Exp) { return nullptr; } +namespace { template <typename Ty> class has_arg_iterator_range { typedef char yes[1]; @@ -935,6 +1117,7 @@ class has_arg_iterator_range { public: static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes); }; +} // namespace static StringRef ClassifyDiagnostic(const CapabilityAttr *A) { return A->getName(); @@ -1020,7 +1203,13 @@ void ThreadSafetyAnalyzer::addLock(FactSet &FSet, } } - // FIXME: deal with acquired before/after annotations. + // Check before/after constraints + if (Handler.issueBetaWarnings() && + !Entry->asserted() && !Entry->declared()) { + GlobalBeforeSet->checkBeforeAfter(Entry->valueDecl(), FSet, *this, + Entry->loc(), DiagKind); + } + // FIXME: Don't always warn when we have support for reentrant locks. if (FSet.findLock(FactMan, *Entry)) { if (!Entry->asserted()) @@ -1119,8 +1308,7 @@ void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, } } - -bool getStaticBooleanValue(Expr* E, bool& TCond) { +static bool getStaticBooleanValue(Expr *E, bool &TCond) { if (isa<CXXNullPtrLiteralExpr>(E) || isa<GNUNullExpr>(E)) { TCond = false; return true; @@ -1230,7 +1418,7 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result, CapExprSet SharedLocksToAdd; // If the condition is a call to a Trylock function, then grab the attributes - for (auto *Attr : FunDecl->getAttrs()) { + for (auto *Attr : FunDecl->attrs()) { switch (Attr->getKind()) { case attr::ExclusiveTrylockFunction: { ExclusiveTrylockFunctionAttr *A = @@ -1265,6 +1453,7 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result, CapDiagKind); } +namespace { /// \brief We use this class to visit different types of expressions in /// CFGBlocks, and build up the lockset. /// An expression may cause us to add or remove locks from the lockset, or else @@ -1308,7 +1497,7 @@ public: void VisitCXXConstructExpr(CXXConstructExpr *Exp); void VisitDeclStmt(DeclStmt *S); }; - +} // namespace /// \brief Warn if the LSet does not contain a lock sufficient to protect access /// of at least the passed in AccessKind. @@ -1500,13 +1689,23 @@ void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK, /// void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) { SourceLocation Loc = Exp->getExprLoc(); - const AttrVec &ArgAttrs = D->getAttrs(); CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd; CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove; + CapExprSet ScopedExclusiveReqs, ScopedSharedReqs; StringRef CapDiagKind = "mutex"; - for(unsigned i = 0; i < ArgAttrs.size(); ++i) { - Attr *At = const_cast<Attr*>(ArgAttrs[i]); + // Figure out if we're calling the constructor of scoped lockable class + bool isScopedVar = false; + if (VD) { + if (const CXXConstructorDecl *CD = dyn_cast<const CXXConstructorDecl>(D)) { + const CXXRecordDecl* PD = CD->getParent(); + if (PD && PD->hasAttr<ScopedLockableAttr>()) + isScopedVar = true; + } + } + + for(Attr *Atconst : D->attrs()) { + Attr* At = const_cast<Attr*>(Atconst); switch (At->getKind()) { // When we encounter a lock function, we need to add the lock to our // lockset. @@ -1564,10 +1763,17 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) { case attr::RequiresCapability: { RequiresCapabilityAttr *A = cast<RequiresCapabilityAttr>(At); - for (auto *Arg : A->args()) + for (auto *Arg : A->args()) { warnIfMutexNotHeld(D, Exp, A->isShared() ? AK_Read : AK_Written, Arg, POK_FunctionCall, ClassifyDiagnostic(A), Exp->getExprLoc()); + // use for adopting a lock + if (isScopedVar) { + Analyzer->getMutexIDs(A->isShared() ? ScopedSharedReqs + : ScopedExclusiveReqs, + A, Exp, D, VD); + } + } break; } @@ -1584,16 +1790,6 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) { } } - // Figure out if we're calling the constructor of scoped lockable class - bool isScopedVar = false; - if (VD) { - if (const CXXConstructorDecl *CD = dyn_cast<const CXXConstructorDecl>(D)) { - const CXXRecordDecl* PD = CD->getParent(); - if (PD && PD->hasAttr<ScopedLockableAttr>()) - isScopedVar = true; - } - } - // Add locks. for (const auto &M : ExclusiveLocksToAdd) Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>( @@ -1611,9 +1807,10 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) { // FIXME: does this store a pointer to DRE? CapabilityExpr Scp = Analyzer->SxBuilder.translateAttrExpr(&DRE, nullptr); - CapExprSet UnderlyingMutexes(ExclusiveLocksToAdd); - std::copy(SharedLocksToAdd.begin(), SharedLocksToAdd.end(), - std::back_inserter(UnderlyingMutexes)); + std::copy(ScopedExclusiveReqs.begin(), ScopedExclusiveReqs.end(), + std::back_inserter(ExclusiveLocksToAdd)); + std::copy(ScopedSharedReqs.begin(), ScopedSharedReqs.end(), + std::back_inserter(SharedLocksToAdd)); Analyzer->addLock(FSet, llvm::make_unique<ScopedLockableFactEntry>( Scp, MLoc, ExclusiveLocksToAdd, SharedLocksToAdd), @@ -1863,7 +2060,7 @@ void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &FSet1, // Return true if block B never continues to its successors. -inline bool neverReturns(const CFGBlock* B) { +static bool neverReturns(const CFGBlock *B) { if (B->hasNoReturnElement()) return true; if (B->empty()) @@ -1940,14 +2137,13 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) { if (!SortedGraph->empty() && D->hasAttrs()) { const CFGBlock *FirstBlock = *SortedGraph->begin(); FactSet &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet; - const AttrVec &ArgAttrs = D->getAttrs(); CapExprSet ExclusiveLocksToAdd; CapExprSet SharedLocksToAdd; StringRef CapDiagKind = "mutex"; SourceLocation Loc = D->getLocation(); - for (const auto *Attr : ArgAttrs) { + for (const auto *Attr : D->attrs()) { Loc = Attr->getLocation(); if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) { getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A, @@ -1979,14 +2175,16 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) { } // FIXME -- Loc can be wrong here. - for (const auto &Mu : ExclusiveLocksToAdd) - addLock(InitialLockset, - llvm::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc), - CapDiagKind, true); - for (const auto &Mu : SharedLocksToAdd) - addLock(InitialLockset, - llvm::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc), - CapDiagKind, true); + for (const auto &Mu : ExclusiveLocksToAdd) { + auto Entry = llvm::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc); + Entry->setDeclared(true); + addLock(InitialLockset, std::move(Entry), CapDiagKind, true); + } + for (const auto &Mu : SharedLocksToAdd) { + auto Entry = llvm::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc); + Entry->setDeclared(true); + addLock(InitialLockset, std::move(Entry), CapDiagKind, true); + } } for (const auto *CurrBlock : *SortedGraph) { @@ -2179,15 +2377,20 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) { /// We traverse the blocks in the CFG, compute the set of mutexes that are held /// at the end of each block, and issue warnings for thread safety violations. /// Each block in the CFG is traversed exactly once. -void runThreadSafetyAnalysis(AnalysisDeclContext &AC, - ThreadSafetyHandler &Handler) { - ThreadSafetyAnalyzer Analyzer(Handler); +void threadSafety::runThreadSafetyAnalysis(AnalysisDeclContext &AC, + ThreadSafetyHandler &Handler, + BeforeSet **BSet) { + if (!*BSet) + *BSet = new BeforeSet; + ThreadSafetyAnalyzer Analyzer(Handler, *BSet); Analyzer.runAnalysis(AC); } +void threadSafety::threadSafetyCleanup(BeforeSet *Cache) { delete Cache; } + /// \brief Helper function that returns a LockKind required for the given level /// of access. -LockKind getLockKindFromAccessKind(AccessKind AK) { +LockKind threadSafety::getLockKindFromAccessKind(AccessKind AK) { switch (AK) { case AK_Read : return LK_Shared; @@ -2196,5 +2399,3 @@ LockKind getLockKindFromAccessKind(AccessKind AK) { } llvm_unreachable("Unknown AccessKind"); } - -}} // end namespace clang::threadSafety diff --git a/contrib/llvm/tools/clang/lib/Analysis/ThreadSafetyCommon.cpp b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafetyCommon.cpp index 563e059..d4b1ce2 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/ThreadSafetyCommon.cpp +++ b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafetyCommon.cpp @@ -31,13 +31,11 @@ #include <algorithm> #include <climits> #include <vector> - - -namespace clang { -namespace threadSafety { +using namespace clang; +using namespace threadSafety; // From ThreadSafetyUtil.h -std::string getSourceLiteralString(const clang::Expr *CE) { +std::string threadSafety::getSourceLiteralString(const clang::Expr *CE) { switch (CE->getStmtClass()) { case Stmt::IntegerLiteralClass: return cast<IntegerLiteral>(CE)->getValue().toString(10, true); @@ -59,18 +57,13 @@ std::string getSourceLiteralString(const clang::Expr *CE) { } } -namespace til { - // Return true if E is a variable that points to an incomplete Phi node. -static bool isIncompletePhi(const SExpr *E) { - if (const auto *Ph = dyn_cast<Phi>(E)) - return Ph->status() == Phi::PH_Incomplete; +static bool isIncompletePhi(const til::SExpr *E) { + if (const auto *Ph = dyn_cast<til::Phi>(E)) + return Ph->status() == til::Phi::PH_Incomplete; return false; } -} // end namespace til - - typedef SExprBuilder::CallingContext CallingContext; @@ -87,9 +80,7 @@ til::SCFG *SExprBuilder::buildCFG(CFGWalker &Walker) { return Scfg; } - - -inline bool isCalleeArrow(const Expr *E) { +static bool isCalleeArrow(const Expr *E) { const MemberExpr *ME = dyn_cast<MemberExpr>(E->IgnoreParenCasts()); return ME ? ME->isArrow() : false; } @@ -313,8 +304,7 @@ til::SExpr *SExprBuilder::translateCXXThisExpr(const CXXThisExpr *TE, return SelfVar; } - -const ValueDecl *getValueDeclFromSExpr(const til::SExpr *E) { +static const ValueDecl *getValueDeclFromSExpr(const til::SExpr *E) { if (auto *V = dyn_cast<til::Variable>(E)) return V->clangDecl(); if (auto *Ph = dyn_cast<til::Phi>(E)) @@ -326,7 +316,7 @@ const ValueDecl *getValueDeclFromSExpr(const til::SExpr *E) { return 0; } -bool hasCppPointerType(const til::SExpr *E) { +static bool hasCppPointerType(const til::SExpr *E) { auto *VD = getValueDeclFromSExpr(E); if (VD && VD->getType()->isPointerType()) return true; @@ -336,9 +326,8 @@ bool hasCppPointerType(const til::SExpr *E) { return false; } - // Grab the very first declaration of virtual method D -const CXXMethodDecl* getFirstVirtualDecl(const CXXMethodDecl *D) { +static const CXXMethodDecl *getFirstVirtualDecl(const CXXMethodDecl *D) { while (true) { D = D->getCanonicalDecl(); CXXMethodDecl::method_iterator I = D->begin_overridden_methods(), @@ -663,7 +652,7 @@ til::SExpr *SExprBuilder::lookupVarDecl(const ValueDecl *VD) { // if E is a til::Variable, update its clangDecl. -inline void maybeUpdateVD(til::SExpr *E, const ValueDecl *VD) { +static void maybeUpdateVD(til::SExpr *E, const ValueDecl *VD) { if (!E) return; if (til::Variable *V = dyn_cast<til::Variable>(E)) { @@ -986,8 +975,3 @@ void printSCFG(CFGWalker &Walker) { TILPrinter::print(Scfg, llvm::errs()); } */ - - -} // end namespace threadSafety - -} // end namespace clang diff --git a/contrib/llvm/tools/clang/lib/Analysis/ThreadSafetyTIL.cpp b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafetyTIL.cpp index ebe374e..2923f7e6 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/ThreadSafetyTIL.cpp +++ b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafetyTIL.cpp @@ -9,13 +9,11 @@ #include "clang/Analysis/Analyses/ThreadSafetyTIL.h" #include "clang/Analysis/Analyses/ThreadSafetyTraverse.h" +using namespace clang; +using namespace threadSafety; +using namespace til; -namespace clang { -namespace threadSafety { -namespace til { - - -StringRef getUnaryOpcodeString(TIL_UnaryOpcode Op) { +StringRef til::getUnaryOpcodeString(TIL_UnaryOpcode Op) { switch (Op) { case UOP_Minus: return "-"; case UOP_BitNot: return "~"; @@ -24,8 +22,7 @@ StringRef getUnaryOpcodeString(TIL_UnaryOpcode Op) { return ""; } - -StringRef getBinaryOpcodeString(TIL_BinaryOpcode Op) { +StringRef til::getBinaryOpcodeString(TIL_BinaryOpcode Op) { switch (Op) { case BOP_Mul: return "*"; case BOP_Div: return "/"; @@ -82,7 +79,7 @@ void BasicBlock::reservePredecessors(unsigned NumPreds) { // If E is a variable, then trace back through any aliases or redundant // Phi nodes to find the canonical definition. -const SExpr *getCanonicalVal(const SExpr *E) { +const SExpr *til::getCanonicalVal(const SExpr *E) { while (true) { if (auto *V = dyn_cast<Variable>(E)) { if (V->kind() == Variable::VK_Let) { @@ -105,7 +102,7 @@ const SExpr *getCanonicalVal(const SExpr *E) { // If E is a variable, then trace back through any aliases or redundant // Phi nodes to find the canonical definition. // The non-const version will simplify incomplete Phi nodes. -SExpr *simplifyToCanonicalVal(SExpr *E) { +SExpr *til::simplifyToCanonicalVal(SExpr *E) { while (true) { if (auto *V = dyn_cast<Variable>(E)) { if (V->kind() != Variable::VK_Let) @@ -135,7 +132,7 @@ SExpr *simplifyToCanonicalVal(SExpr *E) { // Trace the arguments of an incomplete Phi node to see if they have the same // canonical definition. If so, mark the Phi node as redundant. // getCanonicalVal() will recursively call simplifyIncompletePhi(). -void simplifyIncompleteArg(til::Phi *Ph) { +void til::simplifyIncompleteArg(til::Phi *Ph) { assert(Ph && Ph->status() == Phi::PH_Incomplete); // eliminate infinite recursion -- assume that this node is not redundant. @@ -337,7 +334,3 @@ void SCFG::computeNormalForm() { computeNodeID(Block, &BasicBlock::PostDominatorNode); } } - -} // end namespace til -} // end namespace threadSafety -} // end namespace clang diff --git a/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp b/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp index 61a2592..f2f7919 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp +++ b/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp @@ -14,6 +14,7 @@ #include "clang/AST/ASTContext.h" #include "clang/AST/Attr.h" #include "clang/AST/Decl.h" +#include "clang/AST/DeclCXX.h" #include "clang/AST/StmtVisitor.h" #include "clang/Analysis/Analyses/PostOrderCFGView.h" #include "clang/Analysis/Analyses/UninitializedValues.h" @@ -35,9 +36,9 @@ using namespace clang; static bool isTrackedVar(const VarDecl *vd, const DeclContext *dc) { if (vd->isLocalVarDecl() && !vd->hasGlobalStorage() && !vd->isExceptionVariable() && !vd->isInitCapture() && - vd->getDeclContext() == dc) { + !vd->isImplicit() && vd->getDeclContext() == dc) { QualType ty = vd->getType(); - return ty->isScalarType() || ty->isVectorType(); + return ty->isScalarType() || ty->isVectorType() || ty->isRecordType(); } return false; } @@ -347,6 +348,7 @@ public: } static const DeclRefExpr *getSelfInitExpr(VarDecl *VD) { + if (VD->getType()->isRecordType()) return nullptr; if (Expr *Init = VD->getInit()) { const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(stripCasts(VD->getASTContext(), Init)); @@ -376,10 +378,26 @@ void ClassifyRefs::classify(const Expr *E, Class C) { return; } + if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { + if (VarDecl *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) { + if (!VD->isStaticDataMember()) + classify(ME->getBase(), C); + } + return; + } + if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { - if (BO->getOpcode() == BO_Comma) + switch (BO->getOpcode()) { + case BO_PtrMemD: + case BO_PtrMemI: + classify(BO->getLHS(), C); + return; + case BO_Comma: classify(BO->getRHS(), C); - return; + return; + default: + return; + } } FindVarResult Var = findVar(E, DC); @@ -404,7 +422,7 @@ void ClassifyRefs::VisitBinaryOperator(BinaryOperator *BO) { // use. if (BO->isCompoundAssignmentOp()) classify(BO->getLHS(), Use); - else if (BO->getOpcode() == BO_Assign) + else if (BO->getOpcode() == BO_Assign || BO->getOpcode() == BO_Comma) classify(BO->getLHS(), Ignore); } @@ -415,25 +433,40 @@ void ClassifyRefs::VisitUnaryOperator(UnaryOperator *UO) { classify(UO->getSubExpr(), Use); } +static bool isPointerToConst(const QualType &QT) { + return QT->isAnyPointerType() && QT->getPointeeType().isConstQualified(); +} + void ClassifyRefs::VisitCallExpr(CallExpr *CE) { // Classify arguments to std::move as used. if (CE->getNumArgs() == 1) { if (FunctionDecl *FD = CE->getDirectCallee()) { if (FD->isInStdNamespace() && FD->getIdentifier() && FD->getIdentifier()->isStr("move")) { - classify(CE->getArg(0), Use); + // RecordTypes are handled in SemaDeclCXX.cpp. + if (!CE->getArg(0)->getType()->isRecordType()) + classify(CE->getArg(0), Use); return; } } } - // If a value is passed by const reference to a function, we should not assume - // that it is initialized by the call, and we conservatively do not assume - // that it is used. + // If a value is passed by const pointer or by const reference to a function, + // we should not assume that it is initialized by the call, and we + // conservatively do not assume that it is used. for (CallExpr::arg_iterator I = CE->arg_begin(), E = CE->arg_end(); - I != E; ++I) - if ((*I)->getType().isConstQualified() && (*I)->isGLValue()) - classify(*I, Ignore); + I != E; ++I) { + if ((*I)->isGLValue()) { + if ((*I)->getType().isConstQualified()) + classify((*I), Ignore); + } else if (isPointerToConst((*I)->getType())) { + const Expr *Ex = stripCasts(DC->getParentASTContext(), *I); + const UnaryOperator *UO = dyn_cast<UnaryOperator>(Ex); + if (UO && UO->getOpcode() == UO_AddrOf) + Ex = UO->getSubExpr(); + classify(Ex, Ignore); + } + } } void ClassifyRefs::VisitCastExpr(CastExpr *CE) { @@ -804,7 +837,7 @@ struct PruneBlocksHandler : public UninitVariablesHandler { : hadUse(numBlocks, false), hadAnyUse(false), currentBlock(0) {} - virtual ~PruneBlocksHandler() {} + ~PruneBlocksHandler() override {} /// Records if a CFGBlock had a potential use of an uninitialized variable. llvm::BitVector hadUse; diff --git a/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp b/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp index 83228ad..631b978 100644 --- a/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp +++ b/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp @@ -633,7 +633,8 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd, // When the diagnostic string is only "%0", the entire string is being given // by an outside source. Remove unprintable characters from this string // and skip all the other string processing. - if (DiagEnd - DiagStr == 2 && DiagStr[0] == '%' && DiagStr[1] == '0' && + if (DiagEnd - DiagStr == 2 && + StringRef(DiagStr, DiagEnd - DiagStr).equals("%0") && getArgKind(0) == DiagnosticsEngine::ak_std_string) { const std::string &S = getArgStdStr(0); for (char c : S) { @@ -948,14 +949,8 @@ StoredDiagnostic::StoredDiagnostic(DiagnosticsEngine::Level Level, SmallString<64> Message; Info.FormatDiagnostic(Message); this->Message.assign(Message.begin(), Message.end()); - - Ranges.reserve(Info.getNumRanges()); - for (unsigned I = 0, N = Info.getNumRanges(); I != N; ++I) - Ranges.push_back(Info.getRange(I)); - - FixIts.reserve(Info.getNumFixItHints()); - for (unsigned I = 0, N = Info.getNumFixItHints(); I != N; ++I) - FixIts.push_back(Info.getFixItHint(I)); + this->Ranges.assign(Info.getRanges().begin(), Info.getRanges().end()); + this->FixIts.assign(Info.getFixItHints().begin(), Info.getFixItHints().end()); } StoredDiagnostic::StoredDiagnostic(DiagnosticsEngine::Level Level, unsigned ID, diff --git a/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp b/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp index 1c68375..643503b 100644 --- a/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp +++ b/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp @@ -528,7 +528,7 @@ static bool getDiagnosticsInGroup(diag::Flavor Flavor, // An empty group is considered to be a warning group: we have empty groups // for GCC compatibility, and GCC does not have remarks. if (!Group->Members && !Group->SubGroups) - return Flavor == diag::Flavor::Remark ? true : false; + return Flavor == diag::Flavor::Remark; bool NotFound = true; diff --git a/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp b/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp index 214e0f3..c46e2c7 100644 --- a/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp +++ b/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp @@ -430,7 +430,7 @@ FileManager::getBufferForFile(const FileEntry *Entry, bool isVolatile, SmallString<128> FilePath(Entry->getName()); FixupRelativePath(FilePath); - return FS->getBufferForFile(FilePath.str(), FileSize, + return FS->getBufferForFile(FilePath, FileSize, /*RequiresNullTerminator=*/true, isVolatile); } diff --git a/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp b/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp index 613b43f..e830be9 100644 --- a/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp +++ b/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp @@ -35,7 +35,7 @@ IdentifierInfo::IdentifierInfo() { HasMacro = false; HadMacro = false; IsExtension = false; - IsCXX11CompatKeyword = false; + IsFutureCompatKeyword = false; IsPoisoned = false; IsCPPOperatorKeyword = false; NeedsHandleIdentifier = false; @@ -105,10 +105,12 @@ namespace { KEYOPENCL = 0x200, KEYC11 = 0x400, KEYARC = 0x800, - KEYNOMS = 0x01000, - WCHARSUPPORT = 0x02000, - HALFSUPPORT = 0x04000, - KEYALL = (0xffff & ~KEYNOMS) // Because KEYNOMS is used to exclude. + KEYNOMS18 = 0x01000, + KEYNOOPENCL = 0x02000, + WCHARSUPPORT = 0x04000, + HALFSUPPORT = 0x08000, + KEYALL = (0xffff & ~KEYNOMS18 & + ~KEYNOOPENCL) // KEYNOMS18 and KEYNOOPENCL are used to exclude. }; /// \brief How a keyword is treated in the selected standard. @@ -154,15 +156,21 @@ static void AddKeyword(StringRef Keyword, KeywordStatus AddResult = getKeywordStatus(LangOpts, Flags); // Don't add this keyword under MSVCCompat. - if (LangOpts.MSVCCompat && (Flags & KEYNOMS)) - return; + if (LangOpts.MSVCCompat && (Flags & KEYNOMS18) && + !LangOpts.isCompatibleWithMSVC(LangOptions::MSVC2015)) + return; + + // Don't add this keyword under OpenCL. + if (LangOpts.OpenCL && (Flags & KEYNOOPENCL)) + return; + // Don't add this keyword if disabled in this language. if (AddResult == KS_Disabled) return; IdentifierInfo &Info = Table.get(Keyword, AddResult == KS_Future ? tok::identifier : TokenCode); Info.setIsExtensionToken(AddResult == KS_Extension); - Info.setIsCXX11CompatKeyword(AddResult == KS_Future); + Info.setIsFutureCompatKeyword(AddResult == KS_Future); } /// AddCXXOperatorKeyword - Register a C++ operator keyword alternative @@ -207,6 +215,12 @@ void IdentifierTable::AddKeywords(const LangOptions &LangOpts) { if (LangOpts.ParseUnknownAnytype) AddKeyword("__unknown_anytype", tok::kw___unknown_anytype, KEYALL, LangOpts, *this); + + // FIXME: __declspec isn't really a CUDA extension, however it is required for + // supporting cuda_builtin_vars.h, which uses __declspec(property). Once that + // has been rewritten in terms of something more generic, remove this code. + if (LangOpts.CUDA) + AddKeyword("__declspec", tok::kw___declspec, KEYALL, LangOpts, *this); } /// \brief Checks if the specified token kind represents a keyword in the diff --git a/contrib/llvm/tools/clang/lib/Basic/LangOptions.cpp b/contrib/llvm/tools/clang/lib/Basic/LangOptions.cpp index dcbd228..2c87845 100644 --- a/contrib/llvm/tools/clang/lib/Basic/LangOptions.cpp +++ b/contrib/llvm/tools/clang/lib/Basic/LangOptions.cpp @@ -30,7 +30,7 @@ void LangOptions::resetNonModularOptions() { // FIXME: This should not be reset; modules can be different with different // sanitizer options (this affects __has_feature(address_sanitizer) etc). Sanitize.clear(); - SanitizerBlacklistFile.clear(); + SanitizerBlacklistFiles.clear(); CurrentModule.clear(); ImplementationOfModule.clear(); diff --git a/contrib/llvm/tools/clang/lib/Basic/Module.cpp b/contrib/llvm/tools/clang/lib/Basic/Module.cpp index 03f9bd3..7308665 100644 --- a/contrib/llvm/tools/clang/lib/Basic/Module.cpp +++ b/contrib/llvm/tools/clang/lib/Basic/Module.cpp @@ -25,14 +25,14 @@ using namespace clang; Module::Module(StringRef Name, SourceLocation DefinitionLoc, Module *Parent, - bool IsFramework, bool IsExplicit) + bool IsFramework, bool IsExplicit, unsigned VisibilityID) : Name(Name), DefinitionLoc(DefinitionLoc), Parent(Parent), Directory(), - Umbrella(), ASTFile(nullptr), IsMissingRequirement(false), - IsAvailable(true), IsFromModuleFile(false), IsFramework(IsFramework), - IsExplicit(IsExplicit), IsSystem(false), IsExternC(false), - IsInferred(false), InferSubmodules(false), InferExplicitSubmodules(false), - InferExportWildcard(false), ConfigMacrosExhaustive(false), - NameVisibility(Hidden) { + Umbrella(), ASTFile(nullptr), VisibilityID(VisibilityID), + IsMissingRequirement(false), IsAvailable(true), IsFromModuleFile(false), + IsFramework(IsFramework), IsExplicit(IsExplicit), IsSystem(false), + IsExternC(false), IsInferred(false), InferSubmodules(false), + InferExplicitSubmodules(false), InferExportWildcard(false), + ConfigMacrosExhaustive(false), NameVisibility(Hidden) { if (Parent) { if (!Parent->isAvailable()) IsAvailable = false; @@ -58,16 +58,21 @@ Module::~Module() { /// language options has the given feature. static bool hasFeature(StringRef Feature, const LangOptions &LangOpts, const TargetInfo &Target) { - return llvm::StringSwitch<bool>(Feature) - .Case("altivec", LangOpts.AltiVec) - .Case("blocks", LangOpts.Blocks) - .Case("cplusplus", LangOpts.CPlusPlus) - .Case("cplusplus11", LangOpts.CPlusPlus11) - .Case("objc", LangOpts.ObjC1) - .Case("objc_arc", LangOpts.ObjCAutoRefCount) - .Case("opencl", LangOpts.OpenCL) - .Case("tls", Target.isTLSSupported()) - .Default(Target.hasFeature(Feature)); + bool HasFeature = llvm::StringSwitch<bool>(Feature) + .Case("altivec", LangOpts.AltiVec) + .Case("blocks", LangOpts.Blocks) + .Case("cplusplus", LangOpts.CPlusPlus) + .Case("cplusplus11", LangOpts.CPlusPlus11) + .Case("objc", LangOpts.ObjC1) + .Case("objc_arc", LangOpts.ObjCAutoRefCount) + .Case("opencl", LangOpts.OpenCL) + .Case("tls", Target.isTLSSupported()) + .Default(Target.hasFeature(Feature)); + if (!HasFeature) + HasFeature = std::find(LangOpts.ModuleFeatures.begin(), + LangOpts.ModuleFeatures.end(), + Feature) != LangOpts.ModuleFeatures.end(); + return HasFeature; } bool Module::isAvailable(const LangOptions &LangOpts, const TargetInfo &Target, @@ -133,11 +138,11 @@ std::string Module::getFullModuleName() const { return Result; } -const DirectoryEntry *Module::getUmbrellaDir() const { - if (const FileEntry *Header = getUmbrellaHeader()) - return Header->getDir(); +Module::DirectoryName Module::getUmbrellaDir() const { + if (Header U = getUmbrellaHeader()) + return {"", U.Entry->getDir()}; - return Umbrella.dyn_cast<const DirectoryEntry *>(); + return {UmbrellaAsWritten, Umbrella.dyn_cast<const DirectoryEntry *>()}; } ArrayRef<const FileEntry *> Module::getTopHeaders(FileManager &FileMgr) { @@ -153,6 +158,19 @@ ArrayRef<const FileEntry *> Module::getTopHeaders(FileManager &FileMgr) { return llvm::makeArrayRef(TopHeaders.begin(), TopHeaders.end()); } +bool Module::directlyUses(const Module *Requested) const { + auto *Top = getTopLevelModule(); + + // A top-level module implicitly uses itself. + if (Requested->isSubModuleOf(Top)) + return true; + + for (auto *Use : Top->DirectUses) + if (Requested->isSubModuleOf(Use)) + return true; + return false; +} + void Module::addRequirement(StringRef Feature, bool RequiredState, const LangOptions &LangOpts, const TargetInfo &Target) { @@ -316,15 +334,15 @@ void Module::print(raw_ostream &OS, unsigned Indent) const { OS << "\n"; } - if (const FileEntry *UmbrellaHeader = getUmbrellaHeader()) { + if (Header H = getUmbrellaHeader()) { OS.indent(Indent + 2); OS << "umbrella header \""; - OS.write_escaped(UmbrellaHeader->getName()); + OS.write_escaped(H.NameAsWritten); OS << "\"\n"; - } else if (const DirectoryEntry *UmbrellaDir = getUmbrellaDir()) { + } else if (DirectoryName D = getUmbrellaDir()) { OS.indent(Indent + 2); OS << "umbrella \""; - OS.write_escaped(UmbrellaDir->getName()); + OS.write_escaped(D.NameAsWritten); OS << "\"\n"; } @@ -457,4 +475,47 @@ void Module::dump() const { print(llvm::errs()); } +void VisibleModuleSet::setVisible(Module *M, SourceLocation Loc, + VisibleCallback Vis, ConflictCallback Cb) { + if (isVisible(M)) + return; + ++Generation; + + struct Visiting { + Module *M; + Visiting *ExportedBy; + }; + + std::function<void(Visiting)> VisitModule = [&](Visiting V) { + // Modules that aren't available cannot be made visible. + if (!V.M->isAvailable()) + return; + + // Nothing to do for a module that's already visible. + unsigned ID = V.M->getVisibilityID(); + if (ImportLocs.size() <= ID) + ImportLocs.resize(ID + 1); + else if (ImportLocs[ID].isValid()) + return; + + ImportLocs[ID] = Loc; + Vis(M); + + // Make any exported modules visible. + SmallVector<Module *, 16> Exports; + V.M->getExportedModules(Exports); + for (Module *E : Exports) + VisitModule({E, &V}); + + for (auto &C : V.M->Conflicts) { + if (isVisible(C.Other)) { + llvm::SmallVector<Module*, 8> Path; + for (Visiting *I = &V; I; I = I->ExportedBy) + Path.push_back(I->M); + Cb(Path, C.Other, C.Message); + } + } + }; + VisitModule({M, nullptr}); +} diff --git a/contrib/llvm/tools/clang/lib/Basic/OpenMPKinds.cpp b/contrib/llvm/tools/clang/lib/Basic/OpenMPKinds.cpp index 6e98d48..b83a069 100644 --- a/contrib/llvm/tools/clang/lib/Basic/OpenMPKinds.cpp +++ b/contrib/llvm/tools/clang/lib/Basic/OpenMPKinds.cpp @@ -374,7 +374,6 @@ bool clang::isOpenMPPrivate(OpenMPClauseKind Kind) { } bool clang::isOpenMPThreadPrivate(OpenMPClauseKind Kind) { - return Kind == OMPC_threadprivate || - Kind == OMPC_copyin; // TODO add next clauses like 'copyprivate'. + return Kind == OMPC_threadprivate || Kind == OMPC_copyin; } diff --git a/contrib/llvm/tools/clang/lib/Basic/SanitizerBlacklist.cpp b/contrib/llvm/tools/clang/lib/Basic/SanitizerBlacklist.cpp index ea5b8d0..095fcd6 100644 --- a/contrib/llvm/tools/clang/lib/Basic/SanitizerBlacklist.cpp +++ b/contrib/llvm/tools/clang/lib/Basic/SanitizerBlacklist.cpp @@ -15,9 +15,9 @@ using namespace clang; -SanitizerBlacklist::SanitizerBlacklist(StringRef BlacklistPath, - SourceManager &SM) - : SCL(llvm::SpecialCaseList::createOrDie(BlacklistPath)), SM(SM) {} +SanitizerBlacklist::SanitizerBlacklist( + const std::vector<std::string> &BlacklistPaths, SourceManager &SM) + : SCL(llvm::SpecialCaseList::createOrDie(BlacklistPaths)), SM(SM) {} bool SanitizerBlacklist::isBlacklistedGlobal(StringRef GlobalName, StringRef Category) const { diff --git a/contrib/llvm/tools/clang/lib/Basic/Sanitizers.cpp b/contrib/llvm/tools/clang/lib/Basic/Sanitizers.cpp index e9aaa36..8c4884b 100644 --- a/contrib/llvm/tools/clang/lib/Basic/Sanitizers.cpp +++ b/contrib/llvm/tools/clang/lib/Basic/Sanitizers.cpp @@ -11,25 +11,48 @@ // //===----------------------------------------------------------------------===// #include "clang/Basic/Sanitizers.h" +#include "clang/Basic/LLVM.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/ADT/StringSwitch.h" +#include "llvm/Support/MathExtras.h" using namespace clang; -SanitizerSet::SanitizerSet() : Kinds(0) {} +SanitizerSet::SanitizerSet() : Mask(0) {} -bool SanitizerSet::has(SanitizerKind K) const { - unsigned Bit = static_cast<unsigned>(K); - return Kinds & (1 << Bit); +bool SanitizerSet::has(SanitizerMask K) const { + assert(llvm::countPopulation(K) == 1); + return Mask & K; } -void SanitizerSet::set(SanitizerKind K, bool Value) { - unsigned Bit = static_cast<unsigned>(K); - Kinds = Value ? (Kinds | (1 << Bit)) : (Kinds & ~(1 << Bit)); +void SanitizerSet::set(SanitizerMask K, bool Value) { + assert(llvm::countPopulation(K) == 1); + Mask = Value ? (Mask | K) : (Mask & ~K); } void SanitizerSet::clear() { - Kinds = 0; + Mask = 0; } bool SanitizerSet::empty() const { - return Kinds == 0; + return Mask == 0; +} + +SanitizerMask clang::parseSanitizerValue(StringRef Value, bool AllowGroups) { + SanitizerMask ParsedKind = llvm::StringSwitch<SanitizerMask>(Value) +#define SANITIZER(NAME, ID) .Case(NAME, SanitizerKind::ID) +#define SANITIZER_GROUP(NAME, ID, ALIAS) \ + .Case(NAME, AllowGroups ? SanitizerKind::ID##Group : 0) +#include "clang/Basic/Sanitizers.def" + .Default(0); + return ParsedKind; +} + +SanitizerMask clang::expandSanitizerGroups(SanitizerMask Kinds) { +#define SANITIZER(NAME, ID) +#define SANITIZER_GROUP(NAME, ID, ALIAS) \ + if (Kinds & SanitizerKind::ID##Group) \ + Kinds |= SanitizerKind::ID; +#include "clang/Basic/Sanitizers.def" + return Kinds; } diff --git a/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp b/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp index 118e3f3..c0b0453 100644 --- a/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp +++ b/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp @@ -110,8 +110,8 @@ llvm::MemoryBuffer *ContentCache::getBuffer(DiagnosticsEngine &Diag, // possible. if (!BufferOrError) { StringRef FillStr("<<<MISSING SOURCE FILE>>>\n"); - Buffer.setPointer(MemoryBuffer::getNewMemBuffer(ContentsEntry->getSize(), - "<invalid>").release()); + Buffer.setPointer(MemoryBuffer::getNewUninitMemBuffer( + ContentsEntry->getSize(), "<invalid>").release()); char *Ptr = const_cast<char*>(Buffer.getPointer()->getBufferStart()); for (unsigned i = 0, e = ContentsEntry->getSize(); i != e; ++i) Ptr[i] = FillStr[i % FillStr.size()]; diff --git a/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp b/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp index 871bbd5..330258b 100644 --- a/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp +++ b/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp @@ -36,6 +36,7 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) { LongWidth = LongAlign = 32; LongLongWidth = LongLongAlign = 64; SuitableAlign = 64; + DefaultAlignForAttributeAligned = 128; MinGlobalAlign = 0; HalfWidth = 16; HalfAlign = 16; diff --git a/contrib/llvm/tools/clang/lib/Basic/Targets.cpp b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp index a7a0057..a768081 100644 --- a/contrib/llvm/tools/clang/lib/Basic/Targets.cpp +++ b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp @@ -25,9 +25,9 @@ #include "llvm/ADT/StringRef.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/ADT/Triple.h" -#include "llvm/IR/Type.h" #include "llvm/MC/MCSectionMachO.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/TargetParser.h" #include <algorithm> #include <memory> using namespace clang; @@ -185,6 +185,28 @@ static void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts, } namespace { +// CloudABI Target +template <typename Target> +class CloudABITargetInfo : public OSTargetInfo<Target> { +protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + Builder.defineMacro("__CloudABI__"); + Builder.defineMacro("__ELF__"); + + // CloudABI uses ISO/IEC 10646:2012 for wchar_t, char16_t and char32_t. + Builder.defineMacro("__STDC_ISO_10646__", "201206L"); + Builder.defineMacro("__STDC_UTF_16__"); + Builder.defineMacro("__STDC_UTF_32__"); + } + +public: + CloudABITargetInfo(const llvm::Triple &Triple) + : OSTargetInfo<Target>(Triple) { + this->UserLabelPrefix = ""; + } +}; + template<typename Target> class DarwinTargetInfo : public OSTargetInfo<Target> { protected: @@ -364,8 +386,13 @@ protected: DefineStd(Builder, "linux", Opts); Builder.defineMacro("__gnu_linux__"); Builder.defineMacro("__ELF__"); - if (Triple.getEnvironment() == llvm::Triple::Android) + if (Triple.getEnvironment() == llvm::Triple::Android) { Builder.defineMacro("__ANDROID__", "1"); + unsigned Maj, Min, Rev; + Triple.getOSVersion(Maj, Min, Rev); + this->PlatformName = "android"; + this->PlatformMinVersion = VersionTuple(Maj, Min, Rev); + } if (Opts.POSIXThreads) Builder.defineMacro("_REENTRANT"); if (Opts.CPlusPlus) @@ -473,6 +500,17 @@ protected: Builder.defineMacro("__ELF__"); if (Opts.POSIXThreads) Builder.defineMacro("_REENTRANT"); + + switch (Triple.getArch()) { + default: + break; + case llvm::Triple::arm: + case llvm::Triple::armeb: + case llvm::Triple::thumb: + case llvm::Triple::thumbeb: + Builder.defineMacro("__ARM_DWARF_EH__"); + break; + } } public: BitrigTargetInfo(const llvm::Triple &Triple) : OSTargetInfo<Target>(Triple) { @@ -526,6 +564,33 @@ public: } }; +template <typename Target> +class PS4OSTargetInfo : public OSTargetInfo<Target> { +protected: + void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const override { + Builder.defineMacro("__FreeBSD__", "9"); + Builder.defineMacro("__FreeBSD_cc_version", "900001"); + Builder.defineMacro("__KPRINTF_ATTRIBUTE__"); + DefineStd(Builder, "unix", Opts); + Builder.defineMacro("__ELF__"); + Builder.defineMacro("__PS4__"); + } +public: + PS4OSTargetInfo(const llvm::Triple &Triple) : OSTargetInfo<Target>(Triple) { + this->WCharType = this->UnsignedShort; + + this->UserLabelPrefix = ""; + + switch (Triple.getArch()) { + default: + case llvm::Triple::x86_64: + this->MCountName = ".mcount"; + break; + } + } +}; + // Solaris target template<typename Target> class SolarisTargetInfo : public OSTargetInfo<Target> { @@ -574,7 +639,7 @@ protected: if (Opts.RTTIData) Builder.defineMacro("_CPPRTTI"); - if (Opts.Exceptions) + if (Opts.CXXExceptions) Builder.defineMacro("_CPPUNWIND"); } @@ -592,6 +657,9 @@ protected: Builder.defineMacro("_MSC_FULL_VER", Twine(Opts.MSCompatibilityVersion)); // FIXME We cannot encode the revision information into 32-bits Builder.defineMacro("_MSC_BUILD", Twine(1)); + + if (Opts.CPlusPlus11 && Opts.isCompatibleWithMSVC(LangOptions::MSVC2015)) + Builder.defineMacro("_HAS_CHAR16_T_LANGUAGE_SUPPORT", Twine(1)); } if (Opts.MicrosoftExt) { @@ -647,8 +715,7 @@ public: // RegParmMax is inherited from the underlying architecture this->LongDoubleFormat = &llvm::APFloat::IEEEdouble; if (Triple.getArch() == llvm::Triple::arm) { - this->DescriptionString = - "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S128"; + // Handled in ARM's setABI(). } else if (Triple.getArch() == llvm::Triple::x86) { this->DescriptionString = "e-m:e-p:32:32-i64:64-n8:16:32-S128"; } else if (Triple.getArch() == llvm::Triple::x86_64) { @@ -660,19 +727,12 @@ public: this->DescriptionString = "e-p:32:32-i64:64"; } } - typename Target::CallingConvCheckResult checkCallingConvention( - CallingConv CC) const override { - return CC == CC_PnaclCall ? Target::CCCR_OK : - Target::checkCallingConvention(CC); - } }; -} // end anonymous namespace. //===----------------------------------------------------------------------===// // Specific target implementations. //===----------------------------------------------------------------------===// -namespace { // PPC abstract base class class PPCTargetInfo : public TargetInfo { static const Builtin::Info BuiltinInfo[]; @@ -683,13 +743,21 @@ class PPCTargetInfo : public TargetInfo { // Target cpu features. bool HasVSX; bool HasP8Vector; + bool HasP8Crypto; + bool HasDirectMove; + bool HasQPX; + bool HasHTM; + bool HasBPERMD; + bool HasExtDiv; protected: std::string ABI; public: PPCTargetInfo(const llvm::Triple &Triple) - : TargetInfo(Triple), HasVSX(false), HasP8Vector(false) { + : TargetInfo(Triple), HasVSX(false), HasP8Vector(false), + HasP8Crypto(false), HasDirectMove(false), HasQPX(false), HasHTM(false), + HasBPERMD(false), HasExtDiv(false) { BigEndian = (Triple.getArch() != llvm::Triple::ppc64le); LongDoubleWidth = LongDoubleAlign = 128; LongDoubleFormat = &llvm::APFloat::PPCDoubleDouble; @@ -949,8 +1017,40 @@ bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features, continue; } + if (Feature == "bpermd") { + HasBPERMD = true; + continue; + } + + if (Feature == "extdiv") { + HasExtDiv = true; + continue; + } + if (Feature == "power8-vector") { HasP8Vector = true; + HasVSX = true; + continue; + } + + if (Feature == "crypto") { + HasP8Crypto = true; + continue; + } + + if (Feature == "direct-move") { + HasDirectMove = true; + HasVSX = true; + continue; + } + + if (Feature == "qpx") { + HasQPX = true; + continue; + } + + if (Feature == "htm") { + HasHTM = true; continue; } @@ -988,7 +1088,7 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts, } // ABI options. - if (ABI == "elfv1") + if (ABI == "elfv1" || ABI == "elfv1-qpx") Builder.defineMacro("_CALL_ELF", "1"); if (ABI == "elfv2") Builder.defineMacro("_CALL_ELF", "2"); @@ -1106,6 +1206,18 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts, Builder.defineMacro("__VSX__"); if (HasP8Vector) Builder.defineMacro("__POWER8_VECTOR__"); + if (HasP8Crypto) + Builder.defineMacro("__CRYPTO__"); + if (HasHTM) + Builder.defineMacro("__HTM__"); + if (getTriple().getArch() == llvm::Triple::ppc64le || + (defs & ArchDefinePwr8) || (CPU == "pwr8")) { + Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1"); + Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2"); + Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4"); + if (PointerWidth == 64) + Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8"); + } // FIXME: The following are not yet generated here by Clang, but are // generated by GCC: @@ -1144,9 +1256,28 @@ void PPCTargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const { .Default(false); Features["qpx"] = (CPU == "a2q"); - - if (!ABI.empty()) - Features[ABI] = true; + Features["crypto"] = llvm::StringSwitch<bool>(CPU) + .Case("ppc64le", true) + .Case("pwr8", true) + .Default(false); + Features["power8-vector"] = llvm::StringSwitch<bool>(CPU) + .Case("ppc64le", true) + .Case("pwr8", true) + .Default(false); + Features["bpermd"] = llvm::StringSwitch<bool>(CPU) + .Case("ppc64le", true) + .Case("pwr8", true) + .Case("pwr7", true) + .Default(false); + Features["extdiv"] = llvm::StringSwitch<bool>(CPU) + .Case("ppc64le", true) + .Case("pwr8", true) + .Case("pwr7", true) + .Default(false); + Features["direct-move"] = llvm::StringSwitch<bool>(CPU) + .Case("ppc64le", true) + .Case("pwr8", true) + .Default(false); } bool PPCTargetInfo::hasFeature(StringRef Feature) const { @@ -1154,6 +1285,12 @@ bool PPCTargetInfo::hasFeature(StringRef Feature) const { .Case("powerpc", true) .Case("vsx", HasVSX) .Case("power8-vector", HasP8Vector) + .Case("crypto", HasP8Crypto) + .Case("direct-move", HasDirectMove) + .Case("qpx", HasQPX) + .Case("htm", HasHTM) + .Case("bpermd", HasBPERMD) + .Case("extdiv", HasExtDiv) .Default(false); } @@ -1259,9 +1396,7 @@ void PPCTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases, Aliases = GCCRegAliases; NumAliases = llvm::array_lengthof(GCCRegAliases); } -} // end anonymous namespace. -namespace { class PPC32TargetInfo : public PPCTargetInfo { public: PPC32TargetInfo(const llvm::Triple &Triple) : PPCTargetInfo(Triple) { @@ -1293,11 +1428,9 @@ public: return TargetInfo::PowerABIBuiltinVaList; } }; -} // end anonymous namespace. // Note: ABI differences may eventually require us to have a separate // TargetInfo for little endian. -namespace { class PPC64TargetInfo : public PPCTargetInfo { public: PPC64TargetInfo(const llvm::Triple &Triple) : PPCTargetInfo(Triple) { @@ -1334,17 +1467,14 @@ public: } // PPC64 Linux-specifc ABI options. bool setABI(const std::string &Name) override { - if (Name == "elfv1" || Name == "elfv2") { + if (Name == "elfv1" || Name == "elfv1-qpx" || Name == "elfv2") { ABI = Name; return true; } return false; } }; -} // end anonymous namespace. - -namespace { class DarwinPPC32TargetInfo : public DarwinTargetInfo<PPC32TargetInfo> { public: @@ -1372,9 +1502,7 @@ public: DescriptionString = "E-m:o-i64:64-n32:64"; } }; -} // end anonymous namespace. -namespace { static const unsigned NVPTXAddrSpaceMap[] = { 1, // opencl_global 3, // opencl_local @@ -1396,6 +1524,7 @@ namespace { GK_SM21, GK_SM30, GK_SM35, + GK_SM37, } GPU; public: @@ -1431,6 +1560,9 @@ namespace { case GK_SM35: CUDAArchCode = "350"; break; + case GK_SM37: + CUDAArchCode = "370"; + break; default: llvm_unreachable("Unhandled target CPU"); } @@ -1483,6 +1615,7 @@ namespace { .Case("sm_21", GK_SM21) .Case("sm_30", GK_SM30) .Case("sm_35", GK_SM35) + .Case("sm_37", GK_SM37) .Default(GK_NONE); return GPU != GK_NONE; @@ -1510,24 +1643,23 @@ namespace { public: NVPTX32TargetInfo(const llvm::Triple &Triple) : NVPTXTargetInfo(Triple) { PointerWidth = PointerAlign = 32; - SizeType = PtrDiffType = TargetInfo::UnsignedInt; + SizeType = TargetInfo::UnsignedInt; + PtrDiffType = TargetInfo::SignedInt; IntPtrType = TargetInfo::SignedInt; DescriptionString = "e-p:32:32-i64:64-v16:16-v32:32-n16:32:64"; - } + } }; class NVPTX64TargetInfo : public NVPTXTargetInfo { public: NVPTX64TargetInfo(const llvm::Triple &Triple) : NVPTXTargetInfo(Triple) { PointerWidth = PointerAlign = 64; - SizeType = PtrDiffType = TargetInfo::UnsignedLongLong; - IntPtrType = TargetInfo::SignedLongLong; + SizeType = TargetInfo::UnsignedLong; + PtrDiffType = TargetInfo::SignedLong; + IntPtrType = TargetInfo::SignedLong; DescriptionString = "e-i64:64-v16:16-v32:32-n16:32:64"; - } + } }; -} - -namespace { static const unsigned R600AddrSpaceMap[] = { 1, // opencl_global @@ -1557,6 +1689,7 @@ static const char *DescriptionStringSI = class R600TargetInfo : public TargetInfo { static const Builtin::Info BuiltinInfo[]; + static const char * const GCCRegNames[]; /// \brief The GPU profiles supported by the R600 target. enum GPUKind { @@ -1573,10 +1706,27 @@ class R600TargetInfo : public TargetInfo { GK_SEA_ISLANDS } GPU; + bool hasFP64:1; + bool hasFMAF:1; + bool hasLDEXPF:1; + public: R600TargetInfo(const llvm::Triple &Triple) - : TargetInfo(Triple), GPU(GK_R600) { - DescriptionString = DescriptionStringR600; + : TargetInfo(Triple) { + + if (Triple.getArch() == llvm::Triple::amdgcn) { + DescriptionString = DescriptionStringSI; + GPU = GK_SOUTHERN_ISLANDS; + hasFP64 = true; + hasFMAF = true; + hasLDEXPF = true; + } else { + DescriptionString = DescriptionStringR600; + GPU = GK_R600; + hasFP64 = false; + hasFMAF = false; + hasLDEXPF = false; + } AddrSpaceMap = &R600AddrSpaceMap; UseAddrSpaceMapMangling = true; } @@ -1600,10 +1750,7 @@ public: } void getGCCRegNames(const char * const *&Names, - unsigned &numNames) const override { - Names = nullptr; - numNames = 0; - } + unsigned &NumNames) const override; void getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const override { @@ -1625,6 +1772,13 @@ public: void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { Builder.defineMacro("__R600__"); + if (hasFMAF) + Builder.defineMacro("__HAS_FMAF__"); + if (hasLDEXPF) + Builder.defineMacro("__HAS_LDEXPF__"); + if (hasFP64 && Opts.OpenCL) { + Builder.defineMacro("cl_khr_fp64"); + } } BuiltinVaListKind getBuiltinVaListKind() const override { @@ -1682,16 +1836,25 @@ public: case GK_EVERGREEN: case GK_NORTHERN_ISLANDS: DescriptionString = DescriptionStringR600; + hasFP64 = false; + hasFMAF = false; + hasLDEXPF = false; break; case GK_R600_DOUBLE_OPS: case GK_R700_DOUBLE_OPS: case GK_EVERGREEN_DOUBLE_OPS: case GK_CAYMAN: DescriptionString = DescriptionStringR600DoubleOps; + hasFP64 = true; + hasFMAF = true; + hasLDEXPF = false; break; case GK_SOUTHERN_ISLANDS: case GK_SEA_ISLANDS: DescriptionString = DescriptionStringSI; + hasFP64 = true; + hasFMAF = true; + hasLDEXPF = true; break; } @@ -1704,10 +1867,65 @@ const Builtin::Info R600TargetInfo::BuiltinInfo[] = { { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES }, #include "clang/Basic/BuiltinsR600.def" }; +const char * const R600TargetInfo::GCCRegNames[] = { + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", + "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", + "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", + "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", + "v32", "v33", "v34", "v35", "v36", "v37", "v38", "v39", + "v40", "v41", "v42", "v43", "v44", "v45", "v46", "v47", + "v48", "v49", "v50", "v51", "v52", "v53", "v54", "v55", + "v56", "v57", "v58", "v59", "v60", "v61", "v62", "v63", + "v64", "v65", "v66", "v67", "v68", "v69", "v70", "v71", + "v72", "v73", "v74", "v75", "v76", "v77", "v78", "v79", + "v80", "v81", "v82", "v83", "v84", "v85", "v86", "v87", + "v88", "v89", "v90", "v91", "v92", "v93", "v94", "v95", + "v96", "v97", "v98", "v99", "v100", "v101", "v102", "v103", + "v104", "v105", "v106", "v107", "v108", "v109", "v110", "v111", + "v112", "v113", "v114", "v115", "v116", "v117", "v118", "v119", + "v120", "v121", "v122", "v123", "v124", "v125", "v126", "v127", + "v128", "v129", "v130", "v131", "v132", "v133", "v134", "v135", + "v136", "v137", "v138", "v139", "v140", "v141", "v142", "v143", + "v144", "v145", "v146", "v147", "v148", "v149", "v150", "v151", + "v152", "v153", "v154", "v155", "v156", "v157", "v158", "v159", + "v160", "v161", "v162", "v163", "v164", "v165", "v166", "v167", + "v168", "v169", "v170", "v171", "v172", "v173", "v174", "v175", + "v176", "v177", "v178", "v179", "v180", "v181", "v182", "v183", + "v184", "v185", "v186", "v187", "v188", "v189", "v190", "v191", + "v192", "v193", "v194", "v195", "v196", "v197", "v198", "v199", + "v200", "v201", "v202", "v203", "v204", "v205", "v206", "v207", + "v208", "v209", "v210", "v211", "v212", "v213", "v214", "v215", + "v216", "v217", "v218", "v219", "v220", "v221", "v222", "v223", + "v224", "v225", "v226", "v227", "v228", "v229", "v230", "v231", + "v232", "v233", "v234", "v235", "v236", "v237", "v238", "v239", + "v240", "v241", "v242", "v243", "v244", "v245", "v246", "v247", + "v248", "v249", "v250", "v251", "v252", "v253", "v254", "v255", + "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", + "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15", + "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23", + "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31", + "s32", "s33", "s34", "s35", "s36", "s37", "s38", "s39", + "s40", "s41", "s42", "s43", "s44", "s45", "s46", "s47", + "s48", "s49", "s50", "s51", "s52", "s53", "s54", "s55", + "s56", "s57", "s58", "s59", "s60", "s61", "s62", "s63", + "s64", "s65", "s66", "s67", "s68", "s69", "s70", "s71", + "s72", "s73", "s74", "s75", "s76", "s77", "s78", "s79", + "s80", "s81", "s82", "s83", "s84", "s85", "s86", "s87", + "s88", "s89", "s90", "s91", "s92", "s93", "s94", "s95", + "s96", "s97", "s98", "s99", "s100", "s101", "s102", "s103", + "s104", "s105", "s106", "s107", "s108", "s109", "s110", "s111", + "s112", "s113", "s114", "s115", "s116", "s117", "s118", "s119", + "s120", "s121", "s122", "s123", "s124", "s125", "s126", "s127" + "exec", "vcc", "scc", "m0", "flat_scr", "exec_lo", "exec_hi", + "vcc_lo", "vcc_hi", "flat_scr_lo", "flat_scr_hi" +}; -} // end anonymous namespace +void R600TargetInfo::getGCCRegNames(const char * const *&Names, + unsigned &NumNames) const { + Names = GCCRegNames; + NumNames = llvm::array_lengthof(GCCRegNames); +} -namespace { // Namespace for x86 abstract base class const Builtin::Info BuiltinInfo[] = { #define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES }, @@ -2496,11 +2714,6 @@ void X86TargetInfo::setXOPLevel(llvm::StringMap<bool> &Features, XOPEnum Level, void X86TargetInfo::setFeatureEnabledImpl(llvm::StringMap<bool> &Features, StringRef Name, bool Enabled) { - // FIXME: This *really* should not be here. We need some way of translating - // options into llvm subtarget features. - if (Name == "sse4") - Name = "sse4.2"; - Features[Name] = Enabled; if (Name == "mmx") { @@ -3236,9 +3449,7 @@ X86TargetInfo::convertConstraint(const char *&Constraint) const { return std::string(1, *Constraint); } } -} // end anonymous namespace -namespace { // X86-32 generic target class X86_32TargetInfo : public X86TargetInfo { public: @@ -3293,9 +3504,7 @@ public: return X86TargetInfo::validateOperandSize(Constraint, Size); } }; -} // end anonymous namespace -namespace { class NetBSDI386TargetInfo : public NetBSDTargetInfo<X86_32TargetInfo> { public: NetBSDI386TargetInfo(const llvm::Triple &Triple) @@ -3311,9 +3520,7 @@ public: return 1; } }; -} // end anonymous namespace -namespace { class OpenBSDI386TargetInfo : public OpenBSDTargetInfo<X86_32TargetInfo> { public: OpenBSDI386TargetInfo(const llvm::Triple &Triple) @@ -3323,9 +3530,7 @@ public: PtrDiffType = SignedLong; } }; -} // end anonymous namespace -namespace { class BitrigI386TargetInfo : public BitrigTargetInfo<X86_32TargetInfo> { public: BitrigI386TargetInfo(const llvm::Triple &Triple) @@ -3335,9 +3540,7 @@ public: PtrDiffType = SignedLong; } }; -} // end anonymous namespace -namespace { class DarwinI386TargetInfo : public DarwinTargetInfo<X86_32TargetInfo> { public: DarwinI386TargetInfo(const llvm::Triple &Triple) @@ -3353,9 +3556,7 @@ public: } }; -} // end anonymous namespace -namespace { // x86-32 Windows target class WindowsX86_32TargetInfo : public WindowsTargetInfo<X86_32TargetInfo> { public: @@ -3365,8 +3566,9 @@ public: DoubleAlign = LongLongAlign = 64; bool IsWinCOFF = getTriple().isOSWindows() && getTriple().isOSBinFormatCOFF(); - DescriptionString = IsWinCOFF ? "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32" - : "e-m:e-p:32:32-i64:64-f80:32-n8:16:32-S32"; + DescriptionString = IsWinCOFF + ? "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32" + : "e-m:e-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32"; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { @@ -3436,9 +3638,7 @@ public: addMinGWDefines(Opts, Builder); } }; -} // end anonymous namespace -namespace { // x86-32 Cygwin target class CygwinX86_32TargetInfo : public X86_32TargetInfo { public: @@ -3447,7 +3647,7 @@ public: TLSSupported = false; WCharType = UnsignedShort; DoubleAlign = LongLongAlign = 64; - DescriptionString = "e-m:w-p:32:32-i64:64-f80:32-n8:16:32-S32"; + DescriptionString = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32"; } void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { @@ -3460,9 +3660,7 @@ public: Builder.defineMacro("_GNU_SOURCE"); } }; -} // end anonymous namespace -namespace { // x86-32 Haiku target class HaikuX86_32TargetInfo : public X86_32TargetInfo { public: @@ -3481,7 +3679,6 @@ public: Builder.defineMacro("__HAIKU__"); } }; -} // end anonymous namespace // RTEMS Target template<typename Target> @@ -3518,7 +3715,6 @@ public: } }; -namespace { // x86-32 RTEMS target class RTEMSX86_32TargetInfo : public X86_32TargetInfo { public: @@ -3535,14 +3731,14 @@ public: Builder.defineMacro("__rtems__"); } }; -} // end anonymous namespace -namespace { // x86-64 generic target class X86_64TargetInfo : public X86TargetInfo { public: X86_64TargetInfo(const llvm::Triple &Triple) : X86TargetInfo(Triple) { const bool IsX32 = getTriple().getEnvironment() == llvm::Triple::GNUX32; + bool IsWinCOFF = + getTriple().isOSWindows() && getTriple().isOSBinFormatCOFF(); LongWidth = LongAlign = PointerWidth = PointerAlign = IsX32 ? 32 : 64; LongDoubleWidth = 128; LongDoubleAlign = 128; @@ -3557,9 +3753,10 @@ public: RegParmMax = 6; // Pointers are 32-bit in x32. - DescriptionString = (IsX32) - ? "e-m:e-p:32:32-i64:64-f80:128-n8:16:32:64-S128" - : "e-m:e-i64:64-f80:128-n8:16:32:64-S128"; + DescriptionString = IsX32 ? "e-m:e-p:32:32-i64:64-f80:128-n8:16:32:64-S128" + : IsWinCOFF + ? "e-m:w-i64:64-f80:128-n8:16:32:64-S128" + : "e-m:e-i64:64-f80:128-n8:16:32:64-S128"; // Use fpret only for long double. RealTypeUsesObjCFPRet = (1 << TargetInfo::LongDouble); @@ -3595,9 +3792,7 @@ public: // for x32 we need it here explicitly bool hasInt128Type() const override { return true; } }; -} // end anonymous namespace -namespace { // x86-64 Windows target class WindowsX86_64TargetInfo : public WindowsTargetInfo<X86_64TargetInfo> { public: @@ -3613,24 +3808,34 @@ public: IntPtrType = SignedLongLong; this->UserLabelPrefix = ""; } + void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override { WindowsTargetInfo<X86_64TargetInfo>::getTargetDefines(Opts, Builder); Builder.defineMacro("_WIN64"); } + BuiltinVaListKind getBuiltinVaListKind() const override { return TargetInfo::CharPtrBuiltinVaList; } + CallingConvCheckResult checkCallingConvention(CallingConv CC) const override { - return (CC == CC_C || - CC == CC_X86VectorCall || - CC == CC_IntelOclBicc || - CC == CC_X86_64SysV) ? CCCR_OK : CCCR_Warning; + switch (CC) { + case CC_X86StdCall: + case CC_X86ThisCall: + case CC_X86FastCall: + return CCCR_Ignore; + case CC_C: + case CC_X86VectorCall: + case CC_IntelOclBicc: + case CC_X86_64SysV: + return CCCR_OK; + default: + return CCCR_Warning; + } } }; -} // end anonymous namespace -namespace { // x86-64 Windows Visual Studio target class MicrosoftX86_64TargetInfo : public WindowsX86_64TargetInfo { public: @@ -3647,9 +3852,7 @@ public: Builder.defineMacro("_M_AMD64"); } }; -} // end anonymous namespace -namespace { // x86-64 MinGW target class MinGWX86_64TargetInfo : public WindowsX86_64TargetInfo { public: @@ -3667,9 +3870,7 @@ public: Builder.defineMacro("__SEH__"); } }; -} // end anonymous namespace -namespace { class DarwinX86_64TargetInfo : public DarwinTargetInfo<X86_64TargetInfo> { public: DarwinX86_64TargetInfo(const llvm::Triple &Triple) @@ -3683,9 +3884,7 @@ public: DescriptionString = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"; } }; -} // end anonymous namespace -namespace { class OpenBSDX86_64TargetInfo : public OpenBSDTargetInfo<X86_64TargetInfo> { public: OpenBSDX86_64TargetInfo(const llvm::Triple &Triple) @@ -3694,9 +3893,7 @@ public: Int64Type = SignedLongLong; } }; -} // end anonymous namespace -namespace { class BitrigX86_64TargetInfo : public BitrigTargetInfo<X86_64TargetInfo> { public: BitrigX86_64TargetInfo(const llvm::Triple &Triple) @@ -3705,10 +3902,7 @@ public: Int64Type = SignedLongLong; } }; -} - -namespace { class ARMTargetInfo : public TargetInfo { // Possible FPU choices. enum FPUMode { @@ -3800,8 +3994,9 @@ class ARMTargetInfo : public TargetInfo { DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 64; const llvm::Triple &T = getTriple(); - // size_t is unsigned long on MachO-derived environments and NetBSD. - if (T.isOSBinFormatMachO() || T.getOS() == llvm::Triple::NetBSD) + // size_t is unsigned long on MachO-derived environments, NetBSD and Bitrig. + if (T.isOSBinFormatMachO() || T.getOS() == llvm::Triple::NetBSD || + T.getOS() == llvm::Triple::Bitrig) SizeType = UnsignedLong; else SizeType = UnsignedInt; @@ -3831,16 +4026,18 @@ class ARMTargetInfo : public TargetInfo { BigEndian ? "E-m:o-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" : "e-m:o-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"; } else if (T.isOSWindows()) { - // FIXME: this is invalid for WindowsCE assert(!BigEndian && "Windows on ARM does not support big endian"); DescriptionString = "e" - "-m:e" + "-m:w" "-p:32:32" "-i64:64" "-v128:64:128" "-a:0:32" "-n32" "-S64"; + } else if (T.isOSNaCl()) { + assert(!BigEndian && "NaCl on ARM does not support big endian"); + DescriptionString = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S128"; } else { DescriptionString = BigEndian ? "E-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" @@ -3984,8 +4181,15 @@ public: return false; } + // FIXME: This should be based on Arch attributes, not CPU names. void getDefaultFeatures(llvm::StringMap<bool> &Features) const override { StringRef ArchName = getTriple().getArchName(); + unsigned ArchKind = + llvm::ARMTargetParser::parseArch( + llvm::ARMTargetParser::getCanonicalArchName(ArchName)); + bool IsV8 = (ArchKind == llvm::ARM::AK_ARMV8A || + ArchKind == llvm::ARM::AK_ARMV8_1A); + if (CPU == "arm1136jf-s" || CPU == "arm1176jzf-s" || CPU == "mpcore") Features["vfp2"] = true; else if (CPU == "cortex-a8" || CPU == "cortex-a9") { @@ -4002,28 +4206,19 @@ public: Features["neon"] = true; Features["hwdiv"] = true; Features["hwdiv-arm"] = true; - } else if (CPU == "cyclone") { - Features["v8fp"] = true; - Features["neon"] = true; - Features["hwdiv"] = true; - Features["hwdiv-arm"] = true; - } else if (CPU == "cortex-a53" || CPU == "cortex-a57") { + } else if (CPU == "cyclone" || CPU == "cortex-a53" || CPU == "cortex-a57" || + CPU == "cortex-a72") { Features["fp-armv8"] = true; Features["neon"] = true; Features["hwdiv"] = true; Features["hwdiv-arm"] = true; Features["crc"] = true; Features["crypto"] = true; - } else if (CPU == "cortex-r5" || - // Enable the hwdiv extension for all v8a AArch32 cores by - // default. - ArchName == "armv8a" || ArchName == "armv8" || - ArchName == "armebv8a" || ArchName == "armebv8" || - ArchName == "thumbv8a" || ArchName == "thumbv8" || - ArchName == "thumbebv8a" || ArchName == "thumbebv8") { + } else if (CPU == "cortex-r5" || CPU == "cortex-r7" || IsV8) { Features["hwdiv"] = true; Features["hwdiv-arm"] = true; - } else if (CPU == "cortex-m3" || CPU == "cortex-m4" || CPU == "cortex-m7") { + } else if (CPU == "cortex-m3" || CPU == "cortex-m4" || CPU == "cortex-m7" || + CPU == "sc300" || CPU == "cortex-r4" || CPU == "cortex-r4f") { Features["hwdiv"] = true; } } @@ -4080,12 +4275,10 @@ public: Features.push_back("-neonfp"); // Remove front-end specific options which the backend handles differently. - const StringRef FrontEndFeatures[] = { "+soft-float", "+soft-float-abi" }; - for (const auto &FEFeature : FrontEndFeatures) { - auto Feature = std::find(Features.begin(), Features.end(), FEFeature); - if (Feature != Features.end()) - Features.erase(Feature); - } + auto Feature = + std::find(Features.begin(), Features.end(), "+soft-float-abi"); + if (Feature != Features.end()) + Features.erase(Feature); return true; } @@ -4101,7 +4294,18 @@ public: .Default(false); } // FIXME: Should we actually have some table instead of these switches? - static const char *getCPUDefineSuffix(StringRef Name) { + const char *getCPUDefineSuffix(StringRef Name) const { + // FIXME: Use ARMTargetParser + if(Name == "generic") { + auto subarch = getTriple().getSubArch(); + switch (subarch) { + case llvm::Triple::SubArchType::ARMSubArch_v8_1a: + return "8_1A"; + default: + break; + } + } + return llvm::StringSwitch<const char *>(Name) .Cases("arm8", "arm810", "4") .Cases("strongarm", "strongarm110", "strongarm1100", "strongarm1110", @@ -4115,30 +4319,48 @@ public: .Cases("arm10e", "arm1020e", "arm1022e", "5TE") .Cases("xscale", "iwmmxt", "5TE") .Case("arm1136j-s", "6J") - .Cases("arm1176jz-s", "arm1176jzf-s", "6ZK") - .Cases("arm1136jf-s", "mpcorenovfp", "mpcore", "6K") + .Case("arm1136jf-s", "6") + .Cases("mpcorenovfp", "mpcore", "6K") + .Cases("arm1176jz-s", "arm1176jzf-s", "6K") .Cases("arm1156t2-s", "arm1156t2f-s", "6T2") .Cases("cortex-a5", "cortex-a7", "cortex-a8", "7A") .Cases("cortex-a9", "cortex-a12", "cortex-a15", "cortex-a17", "krait", "7A") - .Cases("cortex-r4", "cortex-r5", "7R") + .Cases("cortex-r4", "cortex-r4f", "cortex-r5", "cortex-r7", "7R") .Case("swift", "7S") .Case("cyclone", "8A") - .Case("cortex-m3", "7M") + .Cases("sc300", "cortex-m3", "7M") .Cases("cortex-m4", "cortex-m7", "7EM") - .Case("cortex-m0", "6M") - .Cases("cortex-a53", "cortex-a57", "8A") + .Cases("sc000", "cortex-m0", "cortex-m0plus", "cortex-m1", "6M") + .Cases("cortex-a53", "cortex-a57", "cortex-a72", "8A") .Default(nullptr); } - static const char *getCPUProfile(StringRef Name) { - return llvm::StringSwitch<const char *>(Name) - .Cases("cortex-a5", "cortex-a7", "cortex-a8", "A") - .Cases("cortex-a9", "cortex-a12", "cortex-a15", "cortex-a17", "krait", - "A") - .Cases("cortex-a53", "cortex-a57", "A") - .Cases("cortex-m3", "cortex-m4", "cortex-m0", "cortex-m7", "M") - .Cases("cortex-r4", "cortex-r5", "R") - .Default(""); + const char *getCPUProfile(StringRef Name) const { + if(Name == "generic") { + auto subarch = getTriple().getSubArch(); + switch (subarch) { + case llvm::Triple::SubArchType::ARMSubArch_v8_1a: + return "A"; + default: + break; + } + } + + unsigned CPUArch = llvm::ARMTargetParser::parseCPUArch(Name); + if (CPUArch == llvm::ARM::AK_INVALID) + return ""; + + StringRef ArchName = llvm::ARMTargetParser::getArchName(CPUArch); + switch(llvm::ARMTargetParser::parseArchProfile(ArchName)) { + case llvm::ARM::PK_A: + return "A"; + case llvm::ARM::PK_R: + return "R"; + case llvm::ARM::PK_M: + return "M"; + default: + return ""; + } } bool setCPU(const std::string &Name) override { if (!getCPUDefineSuffix(Name)) @@ -4165,6 +4387,7 @@ public: // We check both CPUArchVer and ArchName because when only triple is // specified, the default CPU is arm1136j-s. return ArchName.endswith("v6t2") || ArchName.endswith("v7") || + ArchName.endswith("v8.1a") || ArchName.endswith("v8") || CPUArch == "6T2" || CPUArchVer >= 7; } void getTargetDefines(const LangOptions &Opts, @@ -4499,8 +4722,8 @@ class ARMleTargetInfo : public ARMTargetInfo { public: ARMleTargetInfo(const llvm::Triple &Triple) : ARMTargetInfo(Triple, false) { } - virtual void getTargetDefines(const LangOptions &Opts, - MacroBuilder &Builder) const { + void getTargetDefines(const LangOptions &Opts, + MacroBuilder &Builder) const override { Builder.defineMacro("__ARMEL__"); ARMTargetInfo::getTargetDefines(Opts, Builder); } @@ -4510,16 +4733,14 @@ class ARMbeTargetInfo : public ARMTargetInfo { public: ARMbeTargetInfo(const llvm::Triple &Triple) : ARMTargetInfo(Triple, true) { } - virtual void getTargetDefines(const LangOptions &Opts, - MacroBuilder &Builder) const { + void getTargetDefines(const LangOptions &Opts, + MacroBuilder &Builder) const override { Builder.defineMacro("__ARMEB__"); Builder.defineMacro("__ARM_BIG_ENDIAN"); ARMTargetInfo::getTargetDefines(Opts, Builder); } }; -} // end anonymous namespace. -namespace { class WindowsARMTargetInfo : public WindowsTargetInfo<ARMleTargetInfo> { const llvm::Triple Triple; public: @@ -4585,10 +4806,7 @@ public: WindowsARMTargetInfo::getVisualStudioDefines(Opts, Builder); } }; -} - -namespace { class DarwinARMTargetInfo : public DarwinTargetInfo<ARMleTargetInfo> { protected: @@ -4610,10 +4828,7 @@ public: TheCXXABI.set(TargetCXXABI::iOS); } }; -} // end anonymous namespace. - -namespace { class AArch64TargetInfo : public TargetInfo { virtual void setDescriptionString() = 0; static const TargetInfo::GCCRegAlias GCCRegAliases[]; @@ -4655,13 +4870,20 @@ public: MaxAtomicInlineWidth = 128; MaxAtomicPromoteWidth = 128; - LongDoubleWidth = LongDoubleAlign = 128; + LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128; LongDoubleFormat = &llvm::APFloat::IEEEquad; // {} in inline assembly are neon specifiers, not assembly variant // specifiers. NoAsmVariants = true; + // AAPCS gives rules for bitfields. 7.1.7 says: "The container type + // contributes to the alignment of the containing aggregate in the same way + // a plain (non bit-field) member of that type would, without exception for + // zero-sized or anonymous bit-fields." + UseBitFieldTypeAlignment = true; + UseZeroLengthBitfieldAlignment = true; + // AArch64 targets default to using the ARM C++ ABI. TheCXXABI.set(TargetCXXABI::GenericAArch64); } @@ -4678,14 +4900,14 @@ public: bool setCPU(const std::string &Name) override { bool CPUKnown = llvm::StringSwitch<bool>(Name) .Case("generic", true) - .Cases("cortex-a53", "cortex-a57", true) + .Cases("cortex-a53", "cortex-a57", "cortex-a72", true) .Case("cyclone", true) .Default(false); return CPUKnown; } - virtual void getTargetDefines(const LangOptions &Opts, - MacroBuilder &Builder) const override { + void getTargetDefines(const LangOptions &Opts, + MacroBuilder &Builder) const override { // Target identification. Builder.defineMacro("__aarch64__"); @@ -4742,10 +4964,16 @@ public: if (Crypto) Builder.defineMacro("__ARM_FEATURE_CRYPTO"); + + // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work. + Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1"); + Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2"); + Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4"); + Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8"); } - virtual void getTargetBuiltins(const Builtin::Info *&Records, - unsigned &NumRecords) const override { + void getTargetBuiltins(const Builtin::Info *&Records, + unsigned &NumRecords) const override { Records = BuiltinInfo; NumRecords = clang::AArch64::LastTSBuiltin - Builtin::FirstTSBuiltin; } @@ -4781,14 +5009,13 @@ public: return TargetInfo::AArch64ABIBuiltinVaList; } - virtual void getGCCRegNames(const char *const *&Names, - unsigned &NumNames) const override; - virtual void getGCCRegAliases(const GCCRegAlias *&Aliases, - unsigned &NumAliases) const override; + void getGCCRegNames(const char *const *&Names, + unsigned &NumNames) const override; + void getGCCRegAliases(const GCCRegAlias *&Aliases, + unsigned &NumAliases) const override; - virtual bool - validateAsmConstraint(const char *&Name, - TargetInfo::ConstraintInfo &Info) const override { + bool validateAsmConstraint(const char *&Name, + TargetInfo::ConstraintInfo &Info) const override { switch (*Name) { default: return false; @@ -4963,9 +5190,7 @@ public: AArch64TargetInfo::getTargetDefines(Opts, Builder); } }; -} // end anonymous namespace. -namespace { class DarwinAArch64TargetInfo : public DarwinTargetInfo<AArch64leTargetInfo> { protected: void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, @@ -4988,7 +5213,7 @@ public: WCharType = SignedInt; UseSignedCharForObjCBool = false; - LongDoubleWidth = LongDoubleAlign = 64; + LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64; LongDoubleFormat = &llvm::APFloat::IEEEdouble; TheCXXABI.set(TargetCXXABI::iOS64); @@ -4998,9 +5223,7 @@ public: return TargetInfo::CharPtrBuiltinVaList; } }; -} // end anonymous namespace -namespace { // Hexagon abstract base class class HexagonTargetInfo : public TargetInfo { static const Builtin::Info BuiltinInfo[]; @@ -5149,24 +5372,25 @@ const Builtin::Info HexagonTargetInfo::BuiltinInfo[] = { ALL_LANGUAGES }, #include "clang/Basic/BuiltinsHexagon.def" }; -} - -namespace { // Shared base class for SPARC v8 (32-bit) and SPARC v9 (64-bit). class SparcTargetInfo : public TargetInfo { static const TargetInfo::GCCRegAlias GCCRegAliases[]; static const char * const GCCRegNames[]; bool SoftFloat; public: - SparcTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) {} + SparcTargetInfo(const llvm::Triple &Triple) + : TargetInfo(Triple), SoftFloat(false) {} bool handleTargetFeatures(std::vector<std::string> &Features, DiagnosticsEngine &Diags) override { - SoftFloat = false; - for (unsigned i = 0, e = Features.size(); i != e; ++i) - if (Features[i] == "+soft-float") - SoftFloat = true; + // The backend doesn't actually handle soft float yet, but in case someone + // is using the support for the front end continue to support it. + auto Feature = std::find(Features.begin(), Features.end(), "+soft-float"); + if (Feature != Features.end()) { + SoftFloat = true; + Features.erase(Feature); + } return true; } void getTargetDefines(const LangOptions &Opts, @@ -5285,6 +5509,15 @@ public: } }; +// SPARCV8el is the 32-bit little-endian mode selected by Triple::sparcel. +class SparcV8elTargetInfo : public SparcV8TargetInfo { + public: + SparcV8elTargetInfo(const llvm::Triple &Triple) : SparcV8TargetInfo(Triple) { + DescriptionString = "e-m:e-p:32:32-i64:64-f128:64-n32-S64"; + BigEndian = false; + } +}; + // SPARC v9 is the 64-bit mode selected by Triple::sparcv9. class SparcV9TargetInfo : public SparcTargetInfo { public: @@ -5339,9 +5572,6 @@ public: } }; -} // end anonymous namespace. - -namespace { class SolarisSparcV8TargetInfo : public SolarisTargetInfo<SparcV8TargetInfo> { public: SolarisSparcV8TargetInfo(const llvm::Triple &Triple) @@ -5350,14 +5580,19 @@ public: PtrDiffType = SignedInt; } }; -} // end anonymous namespace. -namespace { class SystemZTargetInfo : public TargetInfo { + static const Builtin::Info BuiltinInfo[]; static const char *const GCCRegNames[]; + std::string CPU; + bool HasTransactionalExecution; + bool HasVector; public: - SystemZTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) { + SystemZTargetInfo(const llvm::Triple &Triple) + : TargetInfo(Triple), CPU("z10"), HasTransactionalExecution(false), HasVector(false) { + IntMaxType = SignedLong; + Int64Type = SignedLong; TLSSupported = true; IntWidth = IntAlign = 32; LongWidth = LongLongWidth = LongAlign = LongLongAlign = 64; @@ -5365,6 +5600,7 @@ public: LongDoubleWidth = 128; LongDoubleAlign = 64; LongDoubleFormat = &llvm::APFloat::IEEEquad; + DefaultAlignForAttributeAligned = 64; MinGlobalAlign = 16; DescriptionString = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64"; MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64; @@ -5375,12 +5611,13 @@ public: Builder.defineMacro("__s390x__"); Builder.defineMacro("__zarch__"); Builder.defineMacro("__LONG_DOUBLE_128__"); + if (HasTransactionalExecution) + Builder.defineMacro("__HTM__"); } void getTargetBuiltins(const Builtin::Info *&Records, unsigned &NumRecords) const override { - // FIXME: Implement. - Records = nullptr; - NumRecords = 0; + Records = BuiltinInfo; + NumRecords = clang::SystemZ::LastTSBuiltin-Builtin::FirstTSBuiltin; } void getGCCRegNames(const char *const *&Names, @@ -5401,16 +5638,62 @@ public: return TargetInfo::SystemZBuiltinVaList; } bool setCPU(const std::string &Name) override { + CPU = Name; bool CPUKnown = llvm::StringSwitch<bool>(Name) .Case("z10", true) .Case("z196", true) .Case("zEC12", true) + .Case("z13", true) .Default(false); - // No need to store the CPU yet. There aren't any CPU-specific - // macros to define. return CPUKnown; } + void getDefaultFeatures(llvm::StringMap<bool> &Features) const override { + if (CPU == "zEC12") + Features["transactional-execution"] = true; + if (CPU == "z13") { + Features["transactional-execution"] = true; + Features["vector"] = true; + } + } + + bool handleTargetFeatures(std::vector<std::string> &Features, + DiagnosticsEngine &Diags) override { + HasTransactionalExecution = false; + for (unsigned i = 0, e = Features.size(); i != e; ++i) { + if (Features[i] == "+transactional-execution") + HasTransactionalExecution = true; + if (Features[i] == "+vector") + HasVector = true; + } + // If we use the vector ABI, vector types are 64-bit aligned. + if (HasVector) { + MaxVectorAlign = 64; + DescriptionString = "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64" + "-v128:64-a:8:16-n32:64"; + } + return true; + } + + bool hasFeature(StringRef Feature) const override { + return llvm::StringSwitch<bool>(Feature) + .Case("systemz", true) + .Case("htm", HasTransactionalExecution) + .Case("vx", HasVector) + .Default(false); + } + + StringRef getABI() const override { + if (HasVector) + return "vector"; + return ""; + } +}; + +const Builtin::Info SystemZTargetInfo::BuiltinInfo[] = { +#define BUILTIN(ID, TYPE, ATTRS) \ + { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES }, +#include "clang/Basic/BuiltinsSystemZ.def" }; const char *const SystemZTargetInfo::GCCRegNames[] = { @@ -5454,9 +5737,7 @@ validateAsmConstraint(const char *&Name, return true; } } -} -namespace { class MSP430TargetInfo : public TargetInfo { static const char * const GCCRegNames[]; public: @@ -5531,9 +5812,6 @@ namespace { Names = GCCRegNames; NumNames = llvm::array_lengthof(GCCRegNames); } -} - -namespace { // LLVM and Clang cannot be used directly to output native binaries for // target, but is used to compile C code to llvm bitcode with correct @@ -5611,9 +5889,7 @@ namespace { void getGCCRegAliases(const GCCRegAlias *&Aliases, unsigned &NumAliases) const override {} }; -} -namespace { class MipsTargetInfoBase : public TargetInfo { virtual void setDescriptionString() = 0; @@ -5652,6 +5928,10 @@ public: return CPU == "mips32r6" || ABI == "n32" || ABI == "n64" || ABI == "64"; } + bool isNan2008() const override { + return IsNan2008; + } + StringRef getABI() const override { return ABI; } bool setCPU(const std::string &Name) override { bool IsMips32 = getTriple().getArch() == llvm::Triple::mips || @@ -5665,23 +5945,19 @@ public: .Case("mips5", true) .Case("mips32", IsMips32) .Case("mips32r2", IsMips32) + .Case("mips32r3", IsMips32) + .Case("mips32r5", IsMips32) .Case("mips32r6", IsMips32) .Case("mips64", true) .Case("mips64r2", true) + .Case("mips64r3", true) + .Case("mips64r5", true) .Case("mips64r6", true) .Case("octeon", true) .Default(false); } const std::string& getCPU() const { return CPU; } void getDefaultFeatures(llvm::StringMap<bool> &Features) const override { - // The backend enables certain ABI's by default according to the - // architecture. - // Disable both possible defaults so that we don't end up with multiple - // ABI's selected and trigger an assertion. - Features["o32"] = false; - Features["n64"] = false; - - Features[ABI] = true; if (CPU == "octeon") Features["mips64r2"] = Features["cnmips"] = true; else @@ -5818,7 +6094,28 @@ public: case 'R': // An address that can be used in a non-macro load or store Info.setAllowsMemory(); return true; + case 'Z': + if (Name[1] == 'C') { // An address usable by ll, and sc. + Info.setAllowsMemory(); + Name++; // Skip over 'Z'. + return true; + } + return false; + } + } + + std::string convertConstraint(const char *&Constraint) const override { + std::string R; + switch (*Constraint) { + case 'Z': // Two-character constraint; add "^" hint for later parsing. + if (Constraint[1] == 'C') { + R = std::string("^") + std::string(Constraint, 2); + Constraint++; + return R; + } + break; } + return TargetInfo::convertConstraint(Constraint); } const char *getClobbers() const override { @@ -5882,12 +6179,6 @@ public: IsNan2008 = false; } - // Remove front-end specific options. - std::vector<std::string>::iterator it = - std::find(Features.begin(), Features.end(), "+soft-float"); - if (it != Features.end()) - Features.erase(it); - setDescriptionString(); return true; @@ -5938,6 +6229,10 @@ public: Builder.defineMacro("__mips_isa_rev", "1"); else if (CPUStr == "mips32r2") Builder.defineMacro("__mips_isa_rev", "2"); + else if (CPUStr == "mips32r3") + Builder.defineMacro("__mips_isa_rev", "3"); + else if (CPUStr == "mips32r5") + Builder.defineMacro("__mips_isa_rev", "5"); else if (CPUStr == "mips32r6") Builder.defineMacro("__mips_isa_rev", "6"); @@ -6087,6 +6382,10 @@ public: Builder.defineMacro("__mips_isa_rev", "1"); else if (CPUStr == "mips64r2") Builder.defineMacro("__mips_isa_rev", "2"); + else if (CPUStr == "mips64r3") + Builder.defineMacro("__mips_isa_rev", "3"); + else if (CPUStr == "mips64r5") + Builder.defineMacro("__mips_isa_rev", "5"); else if (CPUStr == "mips64r6") Builder.defineMacro("__mips_isa_rev", "6"); @@ -6185,9 +6484,7 @@ public: Mips64TargetInfoBase::getTargetDefines(Opts, Builder); } }; -} // end anonymous namespace. -namespace { class PNaClTargetInfo : public TargetInfo { public: PNaClTargetInfo(const llvm::Triple &Triple) : TargetInfo(Triple) { @@ -6252,9 +6549,7 @@ void PNaClTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases, Aliases = nullptr; NumAliases = 0; } -} // end anonymous namespace. -namespace { class Le64TargetInfo : public TargetInfo { static const Builtin::Info BuiltinInfo[]; @@ -6359,6 +6654,15 @@ namespace { BuiltinVaListKind getBuiltinVaListKind() const override { return TargetInfo::VoidPtrBuiltinVaList; } + + CallingConvCheckResult checkCallingConvention(CallingConv CC) const override { + return (CC == CC_SpirFunction || + CC == CC_SpirKernel) ? CCCR_OK : CCCR_Warning; + } + + CallingConv getDefaultCallingConv(CallingConvMethodType MT) const override { + return CC_SpirFunction; + } }; @@ -6392,9 +6696,7 @@ namespace { DefineStd(Builder, "SPIR64", Opts); } }; -} -namespace { class XCoreTargetInfo : public TargetInfo { static const Builtin::Info BuiltinInfo[]; public: @@ -6460,6 +6762,30 @@ const Builtin::Info XCoreTargetInfo::BuiltinInfo[] = { }; } // end anonymous namespace. +namespace { +// x86_32 Android target +class AndroidX86_32TargetInfo : public LinuxTargetInfo<X86_32TargetInfo> { +public: + AndroidX86_32TargetInfo(const llvm::Triple &Triple) + : LinuxTargetInfo<X86_32TargetInfo>(Triple) { + SuitableAlign = 32; + LongDoubleWidth = 64; + LongDoubleFormat = &llvm::APFloat::IEEEdouble; + } +}; +} // end anonymous namespace + +namespace { +// x86_64 Android target +class AndroidX86_64TargetInfo : public LinuxTargetInfo<X86_64TargetInfo> { +public: + AndroidX86_64TargetInfo(const llvm::Triple &Triple) + : LinuxTargetInfo<X86_64TargetInfo>(Triple) { + LongDoubleFormat = &llvm::APFloat::IEEEquad; + } +}; +} // end anonymous namespace + //===----------------------------------------------------------------------===// // Driver code @@ -6629,10 +6955,10 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) { case llvm::Triple::le32: switch (os) { - case llvm::Triple::NaCl: - return new NaClTargetInfo<PNaClTargetInfo>(Triple); - default: - return nullptr; + case llvm::Triple::NaCl: + return new NaClTargetInfo<PNaClTargetInfo>(Triple); + default: + return nullptr; } case llvm::Triple::le64: @@ -6707,6 +7033,21 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) { return new SparcV8TargetInfo(Triple); } + // The 'sparcel' architecture copies all the above cases except for Solaris. + case llvm::Triple::sparcel: + switch (os) { + case llvm::Triple::Linux: + return new LinuxTargetInfo<SparcV8elTargetInfo>(Triple); + case llvm::Triple::NetBSD: + return new NetBSDTargetInfo<SparcV8elTargetInfo>(Triple); + case llvm::Triple::OpenBSD: + return new OpenBSDTargetInfo<SparcV8elTargetInfo>(Triple); + case llvm::Triple::RTEMS: + return new RTEMSTargetInfo<SparcV8elTargetInfo>(Triple); + default: + return new SparcV8elTargetInfo(Triple); + } + case llvm::Triple::sparcv9: switch (os) { case llvm::Triple::Linux: @@ -6739,8 +7080,14 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) { return new DarwinI386TargetInfo(Triple); switch (os) { - case llvm::Triple::Linux: - return new LinuxTargetInfo<X86_32TargetInfo>(Triple); + case llvm::Triple::Linux: { + switch (Triple.getEnvironment()) { + default: + return new LinuxTargetInfo<X86_32TargetInfo>(Triple); + case llvm::Triple::Android: + return new AndroidX86_32TargetInfo(Triple); + } + } case llvm::Triple::DragonFly: return new DragonFlyBSDTargetInfo<X86_32TargetInfo>(Triple); case llvm::Triple::NetBSD: @@ -6785,8 +7132,16 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) { return new DarwinX86_64TargetInfo(Triple); switch (os) { - case llvm::Triple::Linux: - return new LinuxTargetInfo<X86_64TargetInfo>(Triple); + case llvm::Triple::CloudABI: + return new CloudABITargetInfo<X86_64TargetInfo>(Triple); + case llvm::Triple::Linux: { + switch (Triple.getEnvironment()) { + default: + return new LinuxTargetInfo<X86_64TargetInfo>(Triple); + case llvm::Triple::Android: + return new AndroidX86_64TargetInfo(Triple); + } + } case llvm::Triple::DragonFly: return new DragonFlyBSDTargetInfo<X86_64TargetInfo>(Triple); case llvm::Triple::NetBSD: @@ -6813,22 +7168,24 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple) { } case llvm::Triple::NaCl: return new NaClTargetInfo<X86_64TargetInfo>(Triple); + case llvm::Triple::PS4: + return new PS4OSTargetInfo<X86_64TargetInfo>(Triple); default: return new X86_64TargetInfo(Triple); } - case llvm::Triple::spir: { - if (Triple.getOS() != llvm::Triple::UnknownOS || - Triple.getEnvironment() != llvm::Triple::UnknownEnvironment) - return nullptr; - return new SPIR32TargetInfo(Triple); - } - case llvm::Triple::spir64: { - if (Triple.getOS() != llvm::Triple::UnknownOS || - Triple.getEnvironment() != llvm::Triple::UnknownEnvironment) - return nullptr; - return new SPIR64TargetInfo(Triple); - } + case llvm::Triple::spir: { + if (Triple.getOS() != llvm::Triple::UnknownOS || + Triple.getEnvironment() != llvm::Triple::UnknownEnvironment) + return nullptr; + return new SPIR32TargetInfo(Triple); + } + case llvm::Triple::spir64: { + if (Triple.getOS() != llvm::Triple::UnknownOS || + Triple.getEnvironment() != llvm::Triple::UnknownEnvironment) + return nullptr; + return new SPIR64TargetInfo(Triple); + } } } diff --git a/contrib/llvm/tools/clang/lib/Basic/Version.cpp b/contrib/llvm/tools/clang/lib/Basic/Version.cpp index ae9eb1a..6accb04 100644 --- a/contrib/llvm/tools/clang/lib/Basic/Version.cpp +++ b/contrib/llvm/tools/clang/lib/Basic/Version.cpp @@ -36,7 +36,7 @@ std::string getClangRepositoryPath() { // If the SVN_REPOSITORY is empty, try to use the SVN keyword. This helps us // pick up a tag in an SVN export, for example. - StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_361/final/lib/Basic/Version.cpp $"); + StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/trunk/lib/Basic/Version.cpp $"); if (URL.empty()) { URL = SVNRepository.slice(SVNRepository.find(':'), SVNRepository.find("/lib/Basic")); diff --git a/contrib/llvm/tools/clang/lib/Basic/VersionTuple.cpp b/contrib/llvm/tools/clang/lib/Basic/VersionTuple.cpp index aa43ae2..9c73fd9 100644 --- a/contrib/llvm/tools/clang/lib/Basic/VersionTuple.cpp +++ b/contrib/llvm/tools/clang/lib/Basic/VersionTuple.cpp @@ -32,6 +32,8 @@ raw_ostream& clang::operator<<(raw_ostream &Out, Out << (V.usesUnderscores() ? '_' : '.') << *Minor; if (Optional<unsigned> Subminor = V.getSubminor()) Out << (V.usesUnderscores() ? '_' : '.') << *Subminor; + if (Optional<unsigned> Build = V.getBuild()) + Out << (V.usesUnderscores() ? '_' : '.') << *Build; return Out; } @@ -55,7 +57,7 @@ static bool parseInt(StringRef &input, unsigned &value) { } bool VersionTuple::tryParse(StringRef input) { - unsigned major = 0, minor = 0, micro = 0; + unsigned major = 0, minor = 0, micro = 0, build = 0; // Parse the major version, [0-9]+ if (parseInt(input, major)) return true; @@ -80,9 +82,19 @@ bool VersionTuple::tryParse(StringRef input) { input = input.substr(1); if (parseInt(input, micro)) return true; + if (input.empty()) { + *this = VersionTuple(major, minor, micro); + return false; + } + + // If we're not done, parse the micro version, \.[0-9]+ + if (input[0] != '.') return true; + input = input.substr(1); + if (parseInt(input, build)) return true; + // If we have characters left over, it's an error. if (!input.empty()) return true; - *this = VersionTuple(major, minor, micro); + *this = VersionTuple(major, minor, micro, build); return false; } diff --git a/contrib/llvm/tools/clang/lib/Basic/VirtualFileSystem.cpp b/contrib/llvm/tools/clang/lib/Basic/VirtualFileSystem.cpp index c89195e..8a882e1 100644 --- a/contrib/llvm/tools/clang/lib/Basic/VirtualFileSystem.cpp +++ b/contrib/llvm/tools/clang/lib/Basic/VirtualFileSystem.cpp @@ -92,7 +92,7 @@ class RealFile : public File { } public: - ~RealFile(); + ~RealFile() override; ErrorOr<Status> status() override; ErrorOr<std::unique_ptr<MemoryBuffer>> getBuffer(const Twine &Name, int64_t FileSize = -1, @@ -362,7 +362,7 @@ class DirectoryEntry : public Entry { Status S; public: - virtual ~DirectoryEntry(); + ~DirectoryEntry() override; DirectoryEntry(StringRef Name, std::vector<Entry *> Contents, Status S) : Entry(EK_Directory, Name), Contents(std::move(Contents)), S(std::move(S)) {} @@ -498,7 +498,7 @@ private: ErrorOr<Status> status(const Twine &Path, Entry *E); public: - ~VFSFromYAML(); + ~VFSFromYAML() override; /// \brief Parses \p Buffer, which is expected to be in YAML format and /// returns a virtual file system representing its contents. @@ -1134,7 +1134,7 @@ VFSFromYamlDirIterImpl::VFSFromYamlDirIterImpl(const Twine &_Path, if (Current != End) { SmallString<128> PathStr(Dir); llvm::sys::path::append(PathStr, (*Current)->getName()); - llvm::ErrorOr<vfs::Status> S = FS.status(PathStr.str()); + llvm::ErrorOr<vfs::Status> S = FS.status(PathStr); if (S) CurrentEntry = *S; else @@ -1147,7 +1147,7 @@ std::error_code VFSFromYamlDirIterImpl::increment() { if (++Current != End) { SmallString<128> PathStr(Dir); llvm::sys::path::append(PathStr, (*Current)->getName()); - llvm::ErrorOr<vfs::Status> S = FS.status(PathStr.str()); + llvm::ErrorOr<vfs::Status> S = FS.status(PathStr); if (!S) return S.getError(); CurrentEntry = *S; diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h index 7e7f7fa..cc8652e 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h @@ -87,6 +87,8 @@ namespace clang { virtual bool isHomogeneousAggregateSmallEnough(const Type *Base, uint64_t Members) const; + virtual bool shouldSignExtUnsignedType(QualType Ty) const; + bool isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const; diff --git a/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp index 25ecec5..7f0c7ba 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp @@ -15,22 +15,22 @@ #include "clang/Frontend/FrontendDiagnostic.h" #include "clang/Frontend/Utils.h" #include "llvm/ADT/StringSwitch.h" +#include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Bitcode/BitcodeWriterPass.h" #include "llvm/CodeGen/RegAllocRegistry.h" #include "llvm/CodeGen/SchedulerRegistry.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/IRPrintingPasses.h" +#include "llvm/IR/LegacyPassManager.h" #include "llvm/IR/Module.h" #include "llvm/IR/Verifier.h" #include "llvm/MC/SubtargetFeature.h" -#include "llvm/PassManager.h" #include "llvm/Support/CommandLine.h" -#include "llvm/Support/FormattedStream.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/TargetRegistry.h" #include "llvm/Support/Timer.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Target/TargetSubtargetInfo.h" @@ -55,44 +55,48 @@ class EmitAssemblyHelper { Timer CodeGenerationTime; - mutable PassManager *CodeGenPasses; - mutable PassManager *PerModulePasses; - mutable FunctionPassManager *PerFunctionPasses; + mutable legacy::PassManager *CodeGenPasses; + mutable legacy::PassManager *PerModulePasses; + mutable legacy::FunctionPassManager *PerFunctionPasses; private: - PassManager *getCodeGenPasses() const { + TargetIRAnalysis getTargetIRAnalysis() const { + if (TM) + return TM->getTargetIRAnalysis(); + + return TargetIRAnalysis(); + } + + legacy::PassManager *getCodeGenPasses() const { if (!CodeGenPasses) { - CodeGenPasses = new PassManager(); - CodeGenPasses->add(new DataLayoutPass()); - if (TM) - TM->addAnalysisPasses(*CodeGenPasses); + CodeGenPasses = new legacy::PassManager(); + CodeGenPasses->add( + createTargetTransformInfoWrapperPass(getTargetIRAnalysis())); } return CodeGenPasses; } - PassManager *getPerModulePasses() const { + legacy::PassManager *getPerModulePasses() const { if (!PerModulePasses) { - PerModulePasses = new PassManager(); - PerModulePasses->add(new DataLayoutPass()); - if (TM) - TM->addAnalysisPasses(*PerModulePasses); + PerModulePasses = new legacy::PassManager(); + PerModulePasses->add( + createTargetTransformInfoWrapperPass(getTargetIRAnalysis())); } return PerModulePasses; } - FunctionPassManager *getPerFunctionPasses() const { + legacy::FunctionPassManager *getPerFunctionPasses() const { if (!PerFunctionPasses) { - PerFunctionPasses = new FunctionPassManager(TheModule); - PerFunctionPasses->add(new DataLayoutPass()); - if (TM) - TM->addAnalysisPasses(*PerFunctionPasses); + PerFunctionPasses = new legacy::FunctionPassManager(TheModule); + PerFunctionPasses->add( + createTargetTransformInfoWrapperPass(getTargetIRAnalysis())); } return PerFunctionPasses; } void CreatePasses(); - /// CreateTargetMachine - Generates the TargetMachine. + /// Generates the TargetMachine. /// Returns Null if it is unable to create the target machine. /// Some of our clang tests specify triples which are not built /// into clang. This is okay because these tests check the generated @@ -102,10 +106,10 @@ private: /// the requested target. TargetMachine *CreateTargetMachine(bool MustCreateTM); - /// AddEmitPasses - Add passes necessary to emit assembly or LLVM IR. + /// Add passes necessary to emit assembly or LLVM IR. /// /// \return True on success. - bool AddEmitPasses(BackendAction Action, formatted_raw_ostream &OS); + bool AddEmitPasses(BackendAction Action, raw_pwrite_stream &OS); public: EmitAssemblyHelper(DiagnosticsEngine &_Diags, @@ -128,7 +132,7 @@ public: std::unique_ptr<TargetMachine> TM; - void EmitAssembly(BackendAction Action, raw_ostream *OS); + void EmitAssembly(BackendAction Action, raw_pwrite_stream *OS); }; // We need this wrapper to access LangOpts and CGOpts from extension functions @@ -163,7 +167,7 @@ static void addObjCARCOptPass(const PassManagerBuilder &Builder, PassManagerBase } static void addSampleProfileLoaderPass(const PassManagerBuilder &Builder, - PassManagerBase &PM) { + legacy::PassManagerBase &PM) { const PassManagerBuilderWrapper &BuilderWrapper = static_cast<const PassManagerBuilderWrapper &>(Builder); const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts(); @@ -171,31 +175,38 @@ static void addSampleProfileLoaderPass(const PassManagerBuilder &Builder, } static void addAddDiscriminatorsPass(const PassManagerBuilder &Builder, - PassManagerBase &PM) { + legacy::PassManagerBase &PM) { PM.add(createAddDiscriminatorsPass()); } static void addBoundsCheckingPass(const PassManagerBuilder &Builder, - PassManagerBase &PM) { + legacy::PassManagerBase &PM) { PM.add(createBoundsCheckingPass()); } static void addSanitizerCoveragePass(const PassManagerBuilder &Builder, - PassManagerBase &PM) { + legacy::PassManagerBase &PM) { const PassManagerBuilderWrapper &BuilderWrapper = static_cast<const PassManagerBuilderWrapper&>(Builder); const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts(); - PM.add(createSanitizerCoverageModulePass(CGOpts.SanitizeCoverage)); + SanitizerCoverageOptions Opts; + Opts.CoverageType = + static_cast<SanitizerCoverageOptions::Type>(CGOpts.SanitizeCoverageType); + Opts.IndirectCalls = CGOpts.SanitizeCoverageIndirectCalls; + Opts.TraceBB = CGOpts.SanitizeCoverageTraceBB; + Opts.TraceCmp = CGOpts.SanitizeCoverageTraceCmp; + Opts.Use8bitCounters = CGOpts.SanitizeCoverage8bitCounters; + PM.add(createSanitizerCoverageModulePass(Opts)); } static void addAddressSanitizerPasses(const PassManagerBuilder &Builder, - PassManagerBase &PM) { + legacy::PassManagerBase &PM) { PM.add(createAddressSanitizerFunctionPass()); PM.add(createAddressSanitizerModulePass()); } static void addMemorySanitizerPass(const PassManagerBuilder &Builder, - PassManagerBase &PM) { + legacy::PassManagerBase &PM) { const PassManagerBuilderWrapper &BuilderWrapper = static_cast<const PassManagerBuilderWrapper&>(Builder); const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts(); @@ -215,28 +226,36 @@ static void addMemorySanitizerPass(const PassManagerBuilder &Builder, } static void addThreadSanitizerPass(const PassManagerBuilder &Builder, - PassManagerBase &PM) { + legacy::PassManagerBase &PM) { PM.add(createThreadSanitizerPass()); } static void addDataFlowSanitizerPass(const PassManagerBuilder &Builder, - PassManagerBase &PM) { + legacy::PassManagerBase &PM) { const PassManagerBuilderWrapper &BuilderWrapper = static_cast<const PassManagerBuilderWrapper&>(Builder); const LangOptions &LangOpts = BuilderWrapper.getLangOpts(); - PM.add(createDataFlowSanitizerPass(LangOpts.SanitizerBlacklistFile)); + PM.add(createDataFlowSanitizerPass(LangOpts.SanitizerBlacklistFiles)); } -static TargetLibraryInfo *createTLI(llvm::Triple &TargetTriple, - const CodeGenOptions &CodeGenOpts) { - TargetLibraryInfo *TLI = new TargetLibraryInfo(TargetTriple); +static TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple, + const CodeGenOptions &CodeGenOpts) { + TargetLibraryInfoImpl *TLII = new TargetLibraryInfoImpl(TargetTriple); if (!CodeGenOpts.SimplifyLibCalls) - TLI->disableAllFunctions(); - return TLI; + TLII->disableAllFunctions(); + + switch (CodeGenOpts.getVecLib()) { + case CodeGenOptions::Accelerate: + TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::Accelerate); + break; + default: + break; + } + return TLII; } static void addSymbolRewriterPass(const CodeGenOptions &Opts, - PassManager *MPM) { + legacy::PassManager *MPM) { llvm::SymbolRewriter::RewriteDescriptorList DL; llvm::SymbolRewriter::RewriteMapParser MapParser; @@ -294,7 +313,9 @@ void EmitAssemblyHelper::CreatePasses() { addBoundsCheckingPass); } - if (CodeGenOpts.SanitizeCoverage) { + if (CodeGenOpts.SanitizeCoverageType || + CodeGenOpts.SanitizeCoverageIndirectCalls || + CodeGenOpts.SanitizeCoverageTraceCmp) { PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast, addSanitizerCoveragePass); PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0, @@ -331,7 +352,7 @@ void EmitAssemblyHelper::CreatePasses() { // Figure out TargetLibraryInfo. Triple TargetTriple(TheModule->getTargetTriple()); - PMBuilder.LibraryInfo = createTLI(TargetTriple, CodeGenOpts); + PMBuilder.LibraryInfo = createTLII(TargetTriple, CodeGenOpts); switch (Inlining) { case CodeGenOptions::NoInlining: break; @@ -351,17 +372,15 @@ void EmitAssemblyHelper::CreatePasses() { } // Set up the per-function pass manager. - FunctionPassManager *FPM = getPerFunctionPasses(); + legacy::FunctionPassManager *FPM = getPerFunctionPasses(); if (CodeGenOpts.VerifyModule) FPM->add(createVerifierPass()); PMBuilder.populateFunctionPassManager(*FPM); // Set up the per-module pass manager. - PassManager *MPM = getPerModulePasses(); + legacy::PassManager *MPM = getPerModulePasses(); if (!CodeGenOpts.RewriteMapFiles.empty()) addSymbolRewriterPass(CodeGenOpts, MPM); - if (CodeGenOpts.VerifyModule) - MPM->add(createDebugInfoVerifierPass()); if (!CodeGenOpts.DisableGCov && (CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes)) { @@ -375,6 +394,7 @@ void EmitAssemblyHelper::CreatePasses() { Options.NoRedZone = CodeGenOpts.DisableRedZone; Options.FunctionNamesInData = !CodeGenOpts.CoverageNoFunctionNamesInData; + Options.ExitBlockBeforeBody = CodeGenOpts.CoverageExitBlockBeforeBody; MPM->add(createGCOVProfilerPass(Options)); if (CodeGenOpts.getDebugInfo() == CodeGenOptions::NoDebugInfo) MPM->add(createStripSymbolsPass(true)); @@ -383,6 +403,7 @@ void EmitAssemblyHelper::CreatePasses() { if (CodeGenOpts.ProfileInstrGenerate) { InstrProfOptions Options; Options.NoRedZone = CodeGenOpts.DisableRedZone; + Options.InstrProfileOutput = CodeGenOpts.InstrProfileOutput; MPM->add(createInstrProfilingPass(Options)); } @@ -425,14 +446,12 @@ TargetMachine *EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) { BackendArgs.push_back("-time-passes"); for (unsigned i = 0, e = CodeGenOpts.BackendOptions.size(); i != e; ++i) BackendArgs.push_back(CodeGenOpts.BackendOptions[i].c_str()); - if (CodeGenOpts.NoGlobalMerge) - BackendArgs.push_back("-enable-global-merge=false"); BackendArgs.push_back(nullptr); llvm::cl::ParseCommandLineOptions(BackendArgs.size() - 1, BackendArgs.data()); std::string FeaturesStr; - if (TargetOpts.Features.size()) { + if (!TargetOpts.Features.empty()) { SubtargetFeatures Features; for (std::vector<std::string>::const_iterator it = TargetOpts.Features.begin(), @@ -472,15 +491,6 @@ TargetMachine *EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) { if (CodeGenOpts.CompressDebugSections) Options.CompressDebugSections = true; - // Set frame pointer elimination mode. - if (!CodeGenOpts.DisableFPElim) { - Options.NoFramePointerElim = false; - } else if (CodeGenOpts.OmitLeafFramePointer) { - Options.NoFramePointerElim = false; - } else { - Options.NoFramePointerElim = true; - } - if (CodeGenOpts.UseInitArray) Options.UseInitArray = true; @@ -512,13 +522,13 @@ TargetMachine *EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) { Options.NoNaNsFPMath = CodeGenOpts.NoNaNsFPMath; Options.NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS; Options.UnsafeFPMath = CodeGenOpts.UnsafeFPMath; - Options.UseSoftFloat = CodeGenOpts.SoftFloat; Options.StackAlignmentOverride = CodeGenOpts.StackAlignment; Options.DisableTailCalls = CodeGenOpts.DisableTailCalls; Options.TrapFuncName = CodeGenOpts.TrapFuncName; Options.PositionIndependentExecutable = LangOpts.PIELevel != 0; Options.FunctionSections = CodeGenOpts.FunctionSections; Options.DataSections = CodeGenOpts.DataSections; + Options.UniqueSectionNames = CodeGenOpts.UniqueSectionNames; Options.MCOptions.MCRelaxAll = CodeGenOpts.RelaxAll; Options.MCOptions.MCSaveTempLabels = CodeGenOpts.SaveTempLabels; @@ -536,17 +546,16 @@ TargetMachine *EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) { } bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action, - formatted_raw_ostream &OS) { + raw_pwrite_stream &OS) { // Create the code generator passes. - PassManager *PM = getCodeGenPasses(); + legacy::PassManager *PM = getCodeGenPasses(); // Add LibraryInfo. llvm::Triple TargetTriple(TheModule->getTargetTriple()); - PM->add(createTLI(TargetTriple, CodeGenOpts)); - - // Add Target specific analysis passes. - TM->addAnalysisPasses(*PM); + std::unique_ptr<TargetLibraryInfoImpl> TLII( + createTLII(TargetTriple, CodeGenOpts)); + PM->add(new TargetLibraryInfoWrapperPass(*TLII)); // Normal mode, emit a .s or .o file by running the code generator. Note, // this also adds codegenerator level optimization passes. @@ -561,8 +570,7 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action, // Add ObjC ARC final-cleanup optimizations. This is done as part of the // "codegen" passes so that it isn't run multiple times when there is // inlining happening. - if (LangOpts.ObjCAutoRefCount && - CodeGenOpts.OptimizationLevel > 0) + if (CodeGenOpts.OptimizationLevel > 0) PM->add(createObjCARCContractPass()); if (TM->addPassesToEmitFile(*PM, OS, CGFT, @@ -574,9 +582,9 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action, return true; } -void EmitAssemblyHelper::EmitAssembly(BackendAction Action, raw_ostream *OS) { +void EmitAssemblyHelper::EmitAssembly(BackendAction Action, + raw_pwrite_stream *OS) { TimeRegion Region(llvm::TimePassesIsEnabled ? &CodeGenerationTime : nullptr); - llvm::formatted_raw_ostream FormattedOS; bool UsesCodeGen = (Action != Backend_EmitNothing && Action != Backend_EmitBC && @@ -592,17 +600,17 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action, raw_ostream *OS) { break; case Backend_EmitBC: - getPerModulePasses()->add(createBitcodeWriterPass(*OS)); + getPerModulePasses()->add( + createBitcodeWriterPass(*OS, CodeGenOpts.EmitLLVMUseLists)); break; case Backend_EmitLL: - FormattedOS.setStream(*OS, formatted_raw_ostream::PRESERVE_STREAM); - getPerModulePasses()->add(createPrintModulePass(FormattedOS)); + getPerModulePasses()->add( + createPrintModulePass(*OS, "", CodeGenOpts.EmitLLVMUseLists)); break; default: - FormattedOS.setStream(*OS, formatted_raw_ostream::PRESERVE_STREAM); - if (!AddEmitPasses(Action, FormattedOS)) + if (!AddEmitPasses(Action, *OS)) return; } @@ -639,7 +647,7 @@ void clang::EmitBackendOutput(DiagnosticsEngine &Diags, const clang::TargetOptions &TOpts, const LangOptions &LOpts, StringRef TDesc, Module *M, BackendAction Action, - raw_ostream *OS) { + raw_pwrite_stream *OS) { EmitAssemblyHelper AsmHelper(Diags, CGOpts, TOpts, LOpts, M); AsmHelper.EmitAssembly(Action, OS); @@ -647,9 +655,8 @@ void clang::EmitBackendOutput(DiagnosticsEngine &Diags, // If an optional clang TargetInfo description string was passed in, use it to // verify the LLVM TargetMachine's DataLayout. if (AsmHelper.TM && !TDesc.empty()) { - std::string DLDesc = AsmHelper.TM->getSubtargetImpl() - ->getDataLayout() - ->getStringRepresentation(); + std::string DLDesc = + AsmHelper.TM->getDataLayout()->getStringRepresentation(); if (DLDesc != TDesc) { unsigned DiagID = Diags.getCustomDiagID( DiagnosticsEngine::Error, "backend data layout '%0' does not match " diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGAtomic.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGAtomic.cpp index daac174..da82249 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGAtomic.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGAtomic.cpp @@ -13,6 +13,7 @@ #include "CodeGenFunction.h" #include "CGCall.h" +#include "CGRecordLayout.h" #include "CodeGenModule.h" #include "clang/AST/ASTContext.h" #include "clang/CodeGen/CGFunctionInfo.h" @@ -36,34 +37,94 @@ namespace { CharUnits LValueAlign; TypeEvaluationKind EvaluationKind; bool UseLibcall; + LValue LVal; + CGBitFieldInfo BFI; public: - AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) { - assert(lvalue.isSimple()); - - AtomicTy = lvalue.getType(); - ValueTy = AtomicTy->castAs<AtomicType>()->getValueType(); - EvaluationKind = CGF.getEvaluationKind(ValueTy); - + AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) + : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0), + EvaluationKind(TEK_Scalar), UseLibcall(true) { + assert(!lvalue.isGlobalReg()); ASTContext &C = CGF.getContext(); - - uint64_t ValueAlignInBits; - uint64_t AtomicAlignInBits; - TypeInfo ValueTI = C.getTypeInfo(ValueTy); - ValueSizeInBits = ValueTI.Width; - ValueAlignInBits = ValueTI.Align; - - TypeInfo AtomicTI = C.getTypeInfo(AtomicTy); - AtomicSizeInBits = AtomicTI.Width; - AtomicAlignInBits = AtomicTI.Align; - - assert(ValueSizeInBits <= AtomicSizeInBits); - assert(ValueAlignInBits <= AtomicAlignInBits); - - AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits); - ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits); - if (lvalue.getAlignment().isZero()) - lvalue.setAlignment(AtomicAlign); - + if (lvalue.isSimple()) { + AtomicTy = lvalue.getType(); + if (auto *ATy = AtomicTy->getAs<AtomicType>()) + ValueTy = ATy->getValueType(); + else + ValueTy = AtomicTy; + EvaluationKind = CGF.getEvaluationKind(ValueTy); + + uint64_t ValueAlignInBits; + uint64_t AtomicAlignInBits; + TypeInfo ValueTI = C.getTypeInfo(ValueTy); + ValueSizeInBits = ValueTI.Width; + ValueAlignInBits = ValueTI.Align; + + TypeInfo AtomicTI = C.getTypeInfo(AtomicTy); + AtomicSizeInBits = AtomicTI.Width; + AtomicAlignInBits = AtomicTI.Align; + + assert(ValueSizeInBits <= AtomicSizeInBits); + assert(ValueAlignInBits <= AtomicAlignInBits); + + AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits); + ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits); + if (lvalue.getAlignment().isZero()) + lvalue.setAlignment(AtomicAlign); + + LVal = lvalue; + } else if (lvalue.isBitField()) { + ValueTy = lvalue.getType(); + ValueSizeInBits = C.getTypeSize(ValueTy); + auto &OrigBFI = lvalue.getBitFieldInfo(); + auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment()); + AtomicSizeInBits = C.toBits( + C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1) + .RoundUpToAlignment(lvalue.getAlignment())); + auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldAddr()); + auto OffsetInChars = + (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) * + lvalue.getAlignment(); + VoidPtrAddr = CGF.Builder.CreateConstGEP1_64( + VoidPtrAddr, OffsetInChars.getQuantity()); + auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( + VoidPtrAddr, + CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(), + "atomic_bitfield_base"); + BFI = OrigBFI; + BFI.Offset = Offset; + BFI.StorageSize = AtomicSizeInBits; + LVal = LValue::MakeBitfield(Addr, BFI, lvalue.getType(), + lvalue.getAlignment()); + LVal.setTBAAInfo(lvalue.getTBAAInfo()); + AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned); + if (AtomicTy.isNull()) { + llvm::APInt Size( + /*numBits=*/32, + C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity()); + AtomicTy = C.getConstantArrayType(C.CharTy, Size, ArrayType::Normal, + /*IndexTypeQuals=*/0); + } + AtomicAlign = ValueAlign = lvalue.getAlignment(); + } else if (lvalue.isVectorElt()) { + ValueTy = lvalue.getType()->getAs<VectorType>()->getElementType(); + ValueSizeInBits = C.getTypeSize(ValueTy); + AtomicTy = lvalue.getType(); + AtomicSizeInBits = C.getTypeSize(AtomicTy); + AtomicAlign = ValueAlign = lvalue.getAlignment(); + LVal = lvalue; + } else { + assert(lvalue.isExtVectorElt()); + ValueTy = lvalue.getType(); + ValueSizeInBits = C.getTypeSize(ValueTy); + AtomicTy = ValueTy = CGF.getContext().getExtVectorType( + lvalue.getType(), lvalue.getExtVectorAddr() + ->getType() + ->getPointerElementType() + ->getVectorNumElements()); + AtomicSizeInBits = C.getTypeSize(AtomicTy); + AtomicAlign = ValueAlign = lvalue.getAlignment(); + LVal = lvalue; + } UseLibcall = !C.getTargetInfo().hasBuiltinAtomic( AtomicSizeInBits, C.toBits(lvalue.getAlignment())); } @@ -76,6 +137,17 @@ namespace { uint64_t getValueSizeInBits() const { return ValueSizeInBits; } TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; } bool shouldUseLibcall() const { return UseLibcall; } + const LValue &getAtomicLValue() const { return LVal; } + llvm::Value *getAtomicAddress() const { + if (LVal.isSimple()) + return LVal.getAddress(); + else if (LVal.isBitField()) + return LVal.getBitFieldAddr(); + else if (LVal.isVectorElt()) + return LVal.getVectorAddr(); + assert(LVal.isExtVectorElt()); + return LVal.getExtVectorAddr(); + } /// Is the atomic size larger than the underlying value type? /// @@ -87,7 +159,7 @@ namespace { return (ValueSizeInBits != AtomicSizeInBits); } - bool emitMemSetZeroIfNecessary(LValue dest) const; + bool emitMemSetZeroIfNecessary() const; llvm::Value *getAtomicSizeValue() const { CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits); @@ -99,37 +171,141 @@ namespace { llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const; /// Turn an atomic-layout object into an r-value. - RValue convertTempToRValue(llvm::Value *addr, - AggValueSlot resultSlot, - SourceLocation loc) const; + RValue convertTempToRValue(llvm::Value *addr, AggValueSlot resultSlot, + SourceLocation loc, bool AsValue) const; /// \brief Converts a rvalue to integer value. llvm::Value *convertRValueToInt(RValue RVal) const; - RValue convertIntToValue(llvm::Value *IntVal, AggValueSlot ResultSlot, - SourceLocation Loc) const; + RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal, + AggValueSlot ResultSlot, + SourceLocation Loc, bool AsValue) const; /// Copy an atomic r-value into atomic-layout memory. - void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const; + void emitCopyIntoMemory(RValue rvalue) const; /// Project an l-value down to the value field. - LValue projectValue(LValue lvalue) const { - llvm::Value *addr = lvalue.getAddress(); + LValue projectValue() const { + assert(LVal.isSimple()); + llvm::Value *addr = getAtomicAddress(); if (hasPadding()) - addr = CGF.Builder.CreateStructGEP(addr, 0); + addr = CGF.Builder.CreateStructGEP(nullptr, addr, 0); - return LValue::MakeAddr(addr, getValueType(), lvalue.getAlignment(), - CGF.getContext(), lvalue.getTBAAInfo()); + return LValue::MakeAddr(addr, getValueType(), LVal.getAlignment(), + CGF.getContext(), LVal.getTBAAInfo()); } + /// \brief Emits atomic load. + /// \returns Loaded value. + RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc, + bool AsValue, llvm::AtomicOrdering AO, + bool IsVolatile); + + /// \brief Emits atomic compare-and-exchange sequence. + /// \param Expected Expected value. + /// \param Desired Desired value. + /// \param Success Atomic ordering for success operation. + /// \param Failure Atomic ordering for failed operation. + /// \param IsWeak true if atomic operation is weak, false otherwise. + /// \returns Pair of values: previous value from storage (value type) and + /// boolean flag (i1 type) with true if success and false otherwise. + std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange( + RValue Expected, RValue Desired, + llvm::AtomicOrdering Success = llvm::SequentiallyConsistent, + llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent, + bool IsWeak = false); + + /// \brief Emits atomic update. + /// \param AO Atomic ordering. + /// \param UpdateOp Update operation for the current lvalue. + void EmitAtomicUpdate(llvm::AtomicOrdering AO, + const llvm::function_ref<RValue(RValue)> &UpdateOp, + bool IsVolatile); + /// \brief Emits atomic update. + /// \param AO Atomic ordering. + void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal, + bool IsVolatile); + /// Materialize an atomic r-value in atomic-layout memory. llvm::Value *materializeRValue(RValue rvalue) const; + /// \brief Translates LLVM atomic ordering to GNU atomic ordering for + /// libcalls. + static AtomicExpr::AtomicOrderingKind + translateAtomicOrdering(const llvm::AtomicOrdering AO); + private: bool requiresMemSetZero(llvm::Type *type) const; + + /// \brief Creates temp alloca for intermediate operations on atomic value. + llvm::Value *CreateTempAlloca() const; + + /// \brief Emits atomic load as a libcall. + void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded, + llvm::AtomicOrdering AO, bool IsVolatile); + /// \brief Emits atomic load as LLVM instruction. + llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile); + /// \brief Emits atomic compare-and-exchange op as a libcall. + llvm::Value *EmitAtomicCompareExchangeLibcall( + llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr, + llvm::AtomicOrdering Success = llvm::SequentiallyConsistent, + llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent); + /// \brief Emits atomic compare-and-exchange op as LLVM instruction. + std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp( + llvm::Value *ExpectedVal, llvm::Value *DesiredVal, + llvm::AtomicOrdering Success = llvm::SequentiallyConsistent, + llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent, + bool IsWeak = false); + /// \brief Emit atomic update as libcalls. + void + EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, + const llvm::function_ref<RValue(RValue)> &UpdateOp, + bool IsVolatile); + /// \brief Emit atomic update as LLVM instructions. + void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, + const llvm::function_ref<RValue(RValue)> &UpdateOp, + bool IsVolatile); + /// \brief Emit atomic update as libcalls. + void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal, + bool IsVolatile); + /// \brief Emit atomic update as LLVM instructions. + void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal, + bool IsVolatile); }; } +AtomicExpr::AtomicOrderingKind +AtomicInfo::translateAtomicOrdering(const llvm::AtomicOrdering AO) { + switch (AO) { + case llvm::Unordered: + case llvm::NotAtomic: + case llvm::Monotonic: + return AtomicExpr::AO_ABI_memory_order_relaxed; + case llvm::Acquire: + return AtomicExpr::AO_ABI_memory_order_acquire; + case llvm::Release: + return AtomicExpr::AO_ABI_memory_order_release; + case llvm::AcquireRelease: + return AtomicExpr::AO_ABI_memory_order_acq_rel; + case llvm::SequentiallyConsistent: + return AtomicExpr::AO_ABI_memory_order_seq_cst; + } + llvm_unreachable("Unhandled AtomicOrdering"); +} + +llvm::Value *AtomicInfo::CreateTempAlloca() const { + auto *TempAlloca = CGF.CreateMemTemp( + (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy + : AtomicTy, + "atomic-temp"); + TempAlloca->setAlignment(getAtomicAlignment().getQuantity()); + // Cast to pointer to value type for bitfields. + if (LVal.isBitField()) + return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( + TempAlloca, getAtomicAddress()->getType()); + return TempAlloca; +} + static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, @@ -172,14 +348,16 @@ bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const { llvm_unreachable("bad evaluation kind"); } -bool AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const { - llvm::Value *addr = dest.getAddress(); +bool AtomicInfo::emitMemSetZeroIfNecessary() const { + assert(LVal.isSimple()); + llvm::Value *addr = LVal.getAddress(); if (!requiresMemSetZero(addr->getType()->getPointerElementType())) return false; - CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0), - AtomicSizeInBits / 8, - dest.getAlignment().getQuantity()); + CGF.Builder.CreateMemSet( + addr, llvm::ConstantInt::get(CGF.Int8Ty, 0), + CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(), + LVal.getAlignment().getQuantity()); return true; } @@ -901,29 +1079,53 @@ llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const { RValue AtomicInfo::convertTempToRValue(llvm::Value *addr, AggValueSlot resultSlot, - SourceLocation loc) const { - if (EvaluationKind == TEK_Aggregate) - return resultSlot.asRValue(); - - // Drill into the padding structure if we have one. - if (hasPadding()) - addr = CGF.Builder.CreateStructGEP(addr, 0); - - // Otherwise, just convert the temporary to an r-value using the - // normal conversion routine. - return CGF.convertTempToRValue(addr, getValueType(), loc); + SourceLocation loc, bool AsValue) const { + if (LVal.isSimple()) { + if (EvaluationKind == TEK_Aggregate) + return resultSlot.asRValue(); + + // Drill into the padding structure if we have one. + if (hasPadding()) + addr = CGF.Builder.CreateStructGEP(nullptr, addr, 0); + + // Otherwise, just convert the temporary to an r-value using the + // normal conversion routine. + return CGF.convertTempToRValue(addr, getValueType(), loc); + } + if (!AsValue) + // Get RValue from temp memory as atomic for non-simple lvalues + return RValue::get( + CGF.Builder.CreateAlignedLoad(addr, AtomicAlign.getQuantity())); + if (LVal.isBitField()) + return CGF.EmitLoadOfBitfieldLValue(LValue::MakeBitfield( + addr, LVal.getBitFieldInfo(), LVal.getType(), LVal.getAlignment())); + if (LVal.isVectorElt()) + return CGF.EmitLoadOfLValue(LValue::MakeVectorElt(addr, LVal.getVectorIdx(), + LVal.getType(), + LVal.getAlignment()), + loc); + assert(LVal.isExtVectorElt()); + return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt( + addr, LVal.getExtVectorElts(), LVal.getType(), LVal.getAlignment())); } -RValue AtomicInfo::convertIntToValue(llvm::Value *IntVal, - AggValueSlot ResultSlot, - SourceLocation Loc) const { +RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal, + AggValueSlot ResultSlot, + SourceLocation Loc, + bool AsValue) const { // Try not to in some easy cases. assert(IntVal->getType()->isIntegerTy() && "Expected integer value"); - if (getEvaluationKind() == TEK_Scalar && !hasPadding()) { - auto *ValTy = CGF.ConvertTypeForMem(ValueTy); + if (getEvaluationKind() == TEK_Scalar && + (((!LVal.isBitField() || + LVal.getBitFieldInfo().Size == ValueSizeInBits) && + !hasPadding()) || + !AsValue)) { + auto *ValTy = AsValue + ? CGF.ConvertTypeForMem(ValueTy) + : getAtomicAddress()->getType()->getPointerElementType(); if (ValTy->isIntegerTy()) { assert(IntVal->getType() == ValTy && "Different integer types."); - return RValue::get(IntVal); + return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy)); } else if (ValTy->isPointerTy()) return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy)); else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy)) @@ -935,13 +1137,13 @@ RValue AtomicInfo::convertIntToValue(llvm::Value *IntVal, llvm::Value *Temp; bool TempIsVolatile = false; CharUnits TempAlignment; - if (getEvaluationKind() == TEK_Aggregate) { + if (AsValue && getEvaluationKind() == TEK_Aggregate) { assert(!ResultSlot.isIgnored()); Temp = ResultSlot.getAddr(); TempAlignment = getValueAlignment(); TempIsVolatile = ResultSlot.isVolatile(); } else { - Temp = CGF.CreateMemTemp(getAtomicType(), "atomic-temp"); + Temp = CreateTempAlloca(); TempAlignment = getAtomicAlignment(); } @@ -950,93 +1152,146 @@ RValue AtomicInfo::convertIntToValue(llvm::Value *IntVal, CGF.Builder.CreateAlignedStore(IntVal, CastTemp, TempAlignment.getQuantity()) ->setVolatile(TempIsVolatile); - return convertTempToRValue(Temp, ResultSlot, Loc); + return convertTempToRValue(Temp, ResultSlot, Loc, AsValue); } -/// Emit a load from an l-value of atomic type. Note that the r-value -/// we produce is an r-value of the atomic *value* type. -RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc, - AggValueSlot resultSlot) { - AtomicInfo atomics(*this, src); +void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded, + llvm::AtomicOrdering AO, bool) { + // void __atomic_load(size_t size, void *mem, void *return, int order); + CallArgList Args; + Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType()); + Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicAddress())), + CGF.getContext().VoidPtrTy); + Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)), + CGF.getContext().VoidPtrTy); + Args.add(RValue::get( + llvm::ConstantInt::get(CGF.IntTy, translateAtomicOrdering(AO))), + CGF.getContext().IntTy); + emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args); +} - // Check whether we should use a library call. - if (atomics.shouldUseLibcall()) { - llvm::Value *tempAddr; - if (!resultSlot.isIgnored()) { - assert(atomics.getEvaluationKind() == TEK_Aggregate); - tempAddr = resultSlot.getAddr(); - } else { - tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp"); - } +llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO, + bool IsVolatile) { + // Okay, we're doing this natively. + llvm::Value *Addr = emitCastToAtomicIntPointer(getAtomicAddress()); + llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load"); + Load->setAtomic(AO); - // void __atomic_load(size_t size, void *mem, void *return, int order); - CallArgList args; - args.add(RValue::get(atomics.getAtomicSizeValue()), - getContext().getSizeType()); - args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())), - getContext().VoidPtrTy); - args.add(RValue::get(EmitCastToVoidPtr(tempAddr)), - getContext().VoidPtrTy); - args.add(RValue::get(llvm::ConstantInt::get( - IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)), - getContext().IntTy); - emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args); + // Other decoration. + Load->setAlignment(getAtomicAlignment().getQuantity()); + if (IsVolatile) + Load->setVolatile(true); + if (LVal.getTBAAInfo()) + CGF.CGM.DecorateInstruction(Load, LVal.getTBAAInfo()); + return Load; +} + +/// An LValue is a candidate for having its loads and stores be made atomic if +/// we are operating under /volatile:ms *and* the LValue itself is volatile and +/// performing such an operation can be performed without a libcall. +bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) { + AtomicInfo AI(*this, LV); + bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType()); + // An atomic is inline if we don't need to use a libcall. + bool AtomicIsInline = !AI.shouldUseLibcall(); + return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline; +} - // Produce the r-value. - return atomics.convertTempToRValue(tempAddr, resultSlot, loc); +/// An type is a candidate for having its loads and stores be made atomic if +/// we are operating under /volatile:ms *and* we know the access is volatile and +/// performing such an operation can be performed without a libcall. +bool CodeGenFunction::typeIsSuitableForInlineAtomic(QualType Ty, + bool IsVolatile) const { + // An atomic is inline if we don't need to use a libcall (e.g. it is builtin). + bool AtomicIsInline = getContext().getTargetInfo().hasBuiltinAtomic( + getContext().getTypeSize(Ty), getContext().getTypeAlign(Ty)); + return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline; +} + +RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL, + AggValueSlot Slot) { + llvm::AtomicOrdering AO; + bool IsVolatile = LV.isVolatileQualified(); + if (LV.getType()->isAtomicType()) { + AO = llvm::SequentiallyConsistent; + } else { + AO = llvm::Acquire; + IsVolatile = true; } + return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot); +} - // Okay, we're doing this natively. - llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress()); - llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load"); - load->setAtomic(llvm::SequentiallyConsistent); +RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc, + bool AsValue, llvm::AtomicOrdering AO, + bool IsVolatile) { + // Check whether we should use a library call. + if (shouldUseLibcall()) { + llvm::Value *TempAddr; + if (LVal.isSimple() && !ResultSlot.isIgnored()) { + assert(getEvaluationKind() == TEK_Aggregate); + TempAddr = ResultSlot.getAddr(); + } else + TempAddr = CreateTempAlloca(); + + EmitAtomicLoadLibcall(TempAddr, AO, IsVolatile); + + // Okay, turn that back into the original value or whole atomic (for + // non-simple lvalues) type. + return convertTempToRValue(TempAddr, ResultSlot, Loc, AsValue); + } - // Other decoration. - load->setAlignment(src.getAlignment().getQuantity()); - if (src.isVolatileQualified()) - load->setVolatile(true); - if (src.getTBAAInfo()) - CGM.DecorateInstruction(load, src.getTBAAInfo()); + // Okay, we're doing this natively. + auto *Load = EmitAtomicLoadOp(AO, IsVolatile); // If we're ignoring an aggregate return, don't do anything. - if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored()) + if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored()) return RValue::getAggregate(nullptr, false); - // Okay, turn that back into the original value type. - return atomics.convertIntToValue(load, resultSlot, loc); + // Okay, turn that back into the original value or atomic (for non-simple + // lvalues) type. + return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue); } - +/// Emit a load from an l-value of atomic type. Note that the r-value +/// we produce is an r-value of the atomic *value* type. +RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc, + llvm::AtomicOrdering AO, bool IsVolatile, + AggValueSlot resultSlot) { + AtomicInfo Atomics(*this, src); + return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO, + IsVolatile); +} /// Copy an r-value into memory as part of storing to an atomic type. /// This needs to create a bit-pattern suitable for atomic operations. -void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const { +void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const { + assert(LVal.isSimple()); // If we have an r-value, the rvalue should be of the atomic type, // which means that the caller is responsible for having zeroed // any padding. Just do an aggregate copy of that type. if (rvalue.isAggregate()) { - CGF.EmitAggregateCopy(dest.getAddress(), + CGF.EmitAggregateCopy(getAtomicAddress(), rvalue.getAggregateAddr(), getAtomicType(), (rvalue.isVolatileQualified() - || dest.isVolatileQualified()), - dest.getAlignment()); + || LVal.isVolatileQualified()), + LVal.getAlignment()); return; } // Okay, otherwise we're copying stuff. // Zero out the buffer if necessary. - emitMemSetZeroIfNecessary(dest); + emitMemSetZeroIfNecessary(); // Drill past the padding if present. - dest = projectValue(dest); + LValue TempLVal = projectValue(); // Okay, store the rvalue in. if (rvalue.isScalar()) { - CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true); + CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true); } else { - CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true); + CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true); } } @@ -1050,22 +1305,24 @@ llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const { return rvalue.getAggregateAddr(); // Otherwise, make a temporary and materialize into it. - llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp"); - LValue tempLV = CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment()); - emitCopyIntoMemory(rvalue, tempLV); - return temp; + LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType(), + getAtomicAlignment()); + AtomicInfo Atomics(CGF, TempLV); + Atomics.emitCopyIntoMemory(rvalue); + return TempLV.getAddress(); } llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const { // If we've got a scalar value of the right size, try to avoid going // through memory. - if (RVal.isScalar() && !hasPadding()) { + if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) { llvm::Value *Value = RVal.getScalarVal(); if (isa<llvm::IntegerType>(Value->getType())) - return Value; + return CGF.EmitToMemory(Value, ValueTy); else { - llvm::IntegerType *InputIntTy = - llvm::IntegerType::get(CGF.getLLVMContext(), getValueSizeInBits()); + llvm::IntegerType *InputIntTy = llvm::IntegerType::get( + CGF.getLLVMContext(), + LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits()); if (isa<llvm::PointerType>(Value->getType())) return CGF.Builder.CreatePtrToInt(Value, InputIntTy); else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy)) @@ -1082,12 +1339,324 @@ llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const { getAtomicAlignment().getQuantity()); } +std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp( + llvm::Value *ExpectedVal, llvm::Value *DesiredVal, + llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) { + // Do the atomic store. + auto *Addr = emitCastToAtomicIntPointer(getAtomicAddress()); + auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr, ExpectedVal, DesiredVal, + Success, Failure); + // Other decoration. + Inst->setVolatile(LVal.isVolatileQualified()); + Inst->setWeak(IsWeak); + + // Okay, turn that back into the original value type. + auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0); + auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1); + return std::make_pair(PreviousVal, SuccessFailureVal); +} + +llvm::Value * +AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr, + llvm::Value *DesiredAddr, + llvm::AtomicOrdering Success, + llvm::AtomicOrdering Failure) { + // bool __atomic_compare_exchange(size_t size, void *obj, void *expected, + // void *desired, int success, int failure); + CallArgList Args; + Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType()); + Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicAddress())), + CGF.getContext().VoidPtrTy); + Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)), + CGF.getContext().VoidPtrTy); + Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)), + CGF.getContext().VoidPtrTy); + Args.add(RValue::get(llvm::ConstantInt::get( + CGF.IntTy, translateAtomicOrdering(Success))), + CGF.getContext().IntTy); + Args.add(RValue::get(llvm::ConstantInt::get( + CGF.IntTy, translateAtomicOrdering(Failure))), + CGF.getContext().IntTy); + auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange", + CGF.getContext().BoolTy, Args); + + return SuccessFailureRVal.getScalarVal(); +} + +std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange( + RValue Expected, RValue Desired, llvm::AtomicOrdering Success, + llvm::AtomicOrdering Failure, bool IsWeak) { + if (Failure >= Success) + // Don't assert on undefined behavior. + Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success); + + // Check whether we should use a library call. + if (shouldUseLibcall()) { + // Produce a source address. + auto *ExpectedAddr = materializeRValue(Expected); + auto *DesiredAddr = materializeRValue(Desired); + auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, + Success, Failure); + return std::make_pair( + convertTempToRValue(ExpectedAddr, AggValueSlot::ignored(), + SourceLocation(), /*AsValue=*/false), + Res); + } + + // If we've got a scalar value of the right size, try to avoid going + // through memory. + auto *ExpectedVal = convertRValueToInt(Expected); + auto *DesiredVal = convertRValueToInt(Desired); + auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success, + Failure, IsWeak); + return std::make_pair( + ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(), + SourceLocation(), /*AsValue=*/false), + Res.second); +} + +static void +EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, + const llvm::function_ref<RValue(RValue)> &UpdateOp, + llvm::Value *DesiredAddr) { + llvm::Value *Ptr = nullptr; + LValue UpdateLVal; + RValue UpRVal; + LValue AtomicLVal = Atomics.getAtomicLValue(); + LValue DesiredLVal; + if (AtomicLVal.isSimple()) { + UpRVal = OldRVal; + DesiredLVal = + LValue::MakeAddr(DesiredAddr, AtomicLVal.getType(), + AtomicLVal.getAlignment(), CGF.CGM.getContext()); + } else { + // Build new lvalue for temp address + Ptr = Atomics.materializeRValue(OldRVal); + if (AtomicLVal.isBitField()) { + UpdateLVal = + LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(), + AtomicLVal.getType(), AtomicLVal.getAlignment()); + DesiredLVal = + LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(), + AtomicLVal.getType(), AtomicLVal.getAlignment()); + } else if (AtomicLVal.isVectorElt()) { + UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(), + AtomicLVal.getType(), + AtomicLVal.getAlignment()); + DesiredLVal = LValue::MakeVectorElt( + DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(), + AtomicLVal.getAlignment()); + } else { + assert(AtomicLVal.isExtVectorElt()); + UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(), + AtomicLVal.getType(), + AtomicLVal.getAlignment()); + DesiredLVal = LValue::MakeExtVectorElt( + DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(), + AtomicLVal.getAlignment()); + } + UpdateLVal.setTBAAInfo(AtomicLVal.getTBAAInfo()); + DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo()); + UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation()); + } + // Store new value in the corresponding memory area + RValue NewRVal = UpdateOp(UpRVal); + if (NewRVal.isScalar()) { + CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal); + } else { + assert(NewRVal.isComplex()); + CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal, + /*isInit=*/false); + } +} + +void AtomicInfo::EmitAtomicUpdateLibcall( + llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp, + bool IsVolatile) { + auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); + + llvm::Value *ExpectedAddr = CreateTempAlloca(); + + EmitAtomicLoadLibcall(ExpectedAddr, AO, IsVolatile); + auto *ContBB = CGF.createBasicBlock("atomic_cont"); + auto *ExitBB = CGF.createBasicBlock("atomic_exit"); + CGF.EmitBlock(ContBB); + auto *DesiredAddr = CreateTempAlloca(); + if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) || + requiresMemSetZero( + getAtomicAddress()->getType()->getPointerElementType())) { + auto *OldVal = CGF.Builder.CreateAlignedLoad( + ExpectedAddr, getAtomicAlignment().getQuantity()); + CGF.Builder.CreateAlignedStore(OldVal, DesiredAddr, + getAtomicAlignment().getQuantity()); + } + auto OldRVal = convertTempToRValue(ExpectedAddr, AggValueSlot::ignored(), + SourceLocation(), /*AsValue=*/false); + EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr); + auto *Res = + EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, AO, Failure); + CGF.Builder.CreateCondBr(Res, ExitBB, ContBB); + CGF.EmitBlock(ExitBB, /*IsFinished=*/true); +} + +void AtomicInfo::EmitAtomicUpdateOp( + llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp, + bool IsVolatile) { + auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); + + // Do the atomic load. + auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile); + // For non-simple lvalues perform compare-and-swap procedure. + auto *ContBB = CGF.createBasicBlock("atomic_cont"); + auto *ExitBB = CGF.createBasicBlock("atomic_exit"); + auto *CurBB = CGF.Builder.GetInsertBlock(); + CGF.EmitBlock(ContBB); + llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(), + /*NumReservedValues=*/2); + PHI->addIncoming(OldVal, CurBB); + auto *NewAtomicAddr = CreateTempAlloca(); + auto *NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr); + if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) || + requiresMemSetZero( + getAtomicAddress()->getType()->getPointerElementType())) { + CGF.Builder.CreateAlignedStore(PHI, NewAtomicIntAddr, + getAtomicAlignment().getQuantity()); + } + auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(), + SourceLocation(), /*AsValue=*/false); + EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr); + auto *DesiredVal = CGF.Builder.CreateAlignedLoad( + NewAtomicIntAddr, getAtomicAlignment().getQuantity()); + // Try to write new value using cmpxchg operation + auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure); + PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock()); + CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB); + CGF.EmitBlock(ExitBB, /*IsFinished=*/true); +} + +static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, + RValue UpdateRVal, llvm::Value *DesiredAddr) { + LValue AtomicLVal = Atomics.getAtomicLValue(); + LValue DesiredLVal; + // Build new lvalue for temp address + if (AtomicLVal.isBitField()) { + DesiredLVal = + LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(), + AtomicLVal.getType(), AtomicLVal.getAlignment()); + } else if (AtomicLVal.isVectorElt()) { + DesiredLVal = + LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(), + AtomicLVal.getType(), AtomicLVal.getAlignment()); + } else { + assert(AtomicLVal.isExtVectorElt()); + DesiredLVal = LValue::MakeExtVectorElt( + DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(), + AtomicLVal.getAlignment()); + } + DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo()); + // Store new value in the corresponding memory area + assert(UpdateRVal.isScalar()); + CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal); +} + +void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, + RValue UpdateRVal, bool IsVolatile) { + auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); + + llvm::Value *ExpectedAddr = CreateTempAlloca(); + + EmitAtomicLoadLibcall(ExpectedAddr, AO, IsVolatile); + auto *ContBB = CGF.createBasicBlock("atomic_cont"); + auto *ExitBB = CGF.createBasicBlock("atomic_exit"); + CGF.EmitBlock(ContBB); + auto *DesiredAddr = CreateTempAlloca(); + if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) || + requiresMemSetZero( + getAtomicAddress()->getType()->getPointerElementType())) { + auto *OldVal = CGF.Builder.CreateAlignedLoad( + ExpectedAddr, getAtomicAlignment().getQuantity()); + CGF.Builder.CreateAlignedStore(OldVal, DesiredAddr, + getAtomicAlignment().getQuantity()); + } + EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr); + auto *Res = + EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, AO, Failure); + CGF.Builder.CreateCondBr(Res, ExitBB, ContBB); + CGF.EmitBlock(ExitBB, /*IsFinished=*/true); +} + +void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal, + bool IsVolatile) { + auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); + + // Do the atomic load. + auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile); + // For non-simple lvalues perform compare-and-swap procedure. + auto *ContBB = CGF.createBasicBlock("atomic_cont"); + auto *ExitBB = CGF.createBasicBlock("atomic_exit"); + auto *CurBB = CGF.Builder.GetInsertBlock(); + CGF.EmitBlock(ContBB); + llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(), + /*NumReservedValues=*/2); + PHI->addIncoming(OldVal, CurBB); + auto *NewAtomicAddr = CreateTempAlloca(); + auto *NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr); + if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) || + requiresMemSetZero( + getAtomicAddress()->getType()->getPointerElementType())) { + CGF.Builder.CreateAlignedStore(PHI, NewAtomicIntAddr, + getAtomicAlignment().getQuantity()); + } + EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr); + auto *DesiredVal = CGF.Builder.CreateAlignedLoad( + NewAtomicIntAddr, getAtomicAlignment().getQuantity()); + // Try to write new value using cmpxchg operation + auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure); + PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock()); + CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB); + CGF.EmitBlock(ExitBB, /*IsFinished=*/true); +} + +void AtomicInfo::EmitAtomicUpdate( + llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp, + bool IsVolatile) { + if (shouldUseLibcall()) { + EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile); + } else { + EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile); + } +} + +void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal, + bool IsVolatile) { + if (shouldUseLibcall()) { + EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile); + } else { + EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile); + } +} + +void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue, + bool isInit) { + bool IsVolatile = lvalue.isVolatileQualified(); + llvm::AtomicOrdering AO; + if (lvalue.getType()->isAtomicType()) { + AO = llvm::SequentiallyConsistent; + } else { + AO = llvm::Release; + IsVolatile = true; + } + return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit); +} + /// Emit a store to an l-value of atomic type. /// /// Note that the r-value is expected to be an r-value *of the atomic /// type*; this means that for aggregate r-values, it should include /// storage for any padding that was necessary. -void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) { +void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, + llvm::AtomicOrdering AO, bool IsVolatile, + bool isInit) { // If this is an aggregate r-value, it should agree in type except // maybe for address-space qualification. assert(!rvalue.isAggregate() || @@ -1095,54 +1664,64 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) { == dest.getAddress()->getType()->getPointerElementType()); AtomicInfo atomics(*this, dest); + LValue LVal = atomics.getAtomicLValue(); // If this is an initialization, just put the value there normally. - if (isInit) { - atomics.emitCopyIntoMemory(rvalue, dest); - return; - } + if (LVal.isSimple()) { + if (isInit) { + atomics.emitCopyIntoMemory(rvalue); + return; + } - // Check whether we should use a library call. - if (atomics.shouldUseLibcall()) { - // Produce a source address. - llvm::Value *srcAddr = atomics.materializeRValue(rvalue); + // Check whether we should use a library call. + if (atomics.shouldUseLibcall()) { + // Produce a source address. + llvm::Value *srcAddr = atomics.materializeRValue(rvalue); - // void __atomic_store(size_t size, void *mem, void *val, int order) - CallArgList args; - args.add(RValue::get(atomics.getAtomicSizeValue()), - getContext().getSizeType()); - args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())), - getContext().VoidPtrTy); - args.add(RValue::get(EmitCastToVoidPtr(srcAddr)), - getContext().VoidPtrTy); - args.add(RValue::get(llvm::ConstantInt::get( - IntTy, AtomicExpr::AO_ABI_memory_order_seq_cst)), - getContext().IntTy); - emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args); + // void __atomic_store(size_t size, void *mem, void *val, int order) + CallArgList args; + args.add(RValue::get(atomics.getAtomicSizeValue()), + getContext().getSizeType()); + args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicAddress())), + getContext().VoidPtrTy); + args.add(RValue::get(EmitCastToVoidPtr(srcAddr)), getContext().VoidPtrTy); + args.add(RValue::get(llvm::ConstantInt::get( + IntTy, AtomicInfo::translateAtomicOrdering(AO))), + getContext().IntTy); + emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args); + return; + } + + // Okay, we're doing this natively. + llvm::Value *intValue = atomics.convertRValueToInt(rvalue); + + // Do the atomic store. + llvm::Value *addr = + atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress()); + intValue = Builder.CreateIntCast( + intValue, addr->getType()->getPointerElementType(), /*isSigned=*/false); + llvm::StoreInst *store = Builder.CreateStore(intValue, addr); + + // Initializations don't need to be atomic. + if (!isInit) + store->setAtomic(AO); + + // Other decoration. + store->setAlignment(dest.getAlignment().getQuantity()); + if (IsVolatile) + store->setVolatile(true); + if (dest.getTBAAInfo()) + CGM.DecorateInstruction(store, dest.getTBAAInfo()); return; } - // Okay, we're doing this natively. - llvm::Value *intValue = atomics.convertRValueToInt(rvalue); - - // Do the atomic store. - llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress()); - llvm::StoreInst *store = Builder.CreateStore(intValue, addr); - - // Initializations don't need to be atomic. - if (!isInit) store->setAtomic(llvm::SequentiallyConsistent); - - // Other decoration. - store->setAlignment(dest.getAlignment().getQuantity()); - if (dest.isVolatileQualified()) - store->setVolatile(true); - if (dest.getTBAAInfo()) - CGM.DecorateInstruction(store, dest.getTBAAInfo()); + // Emit simple atomic update operation. + atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile); } /// Emit a compare-and-exchange op for atomic type. /// -std::pair<RValue, RValue> CodeGenFunction::EmitAtomicCompareExchange( +std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange( LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak, AggValueSlot Slot) { @@ -1156,56 +1735,15 @@ std::pair<RValue, RValue> CodeGenFunction::EmitAtomicCompareExchange( Obj.getAddress()->getType()->getPointerElementType()); AtomicInfo Atomics(*this, Obj); - if (Failure >= Success) - // Don't assert on undefined behavior. - Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success); - - auto Alignment = Atomics.getValueAlignment(); - // Check whether we should use a library call. - if (Atomics.shouldUseLibcall()) { - auto *ExpectedAddr = Atomics.materializeRValue(Expected); - // Produce a source address. - auto *DesiredAddr = Atomics.materializeRValue(Desired); - // bool __atomic_compare_exchange(size_t size, void *obj, void *expected, - // void *desired, int success, int failure); - CallArgList Args; - Args.add(RValue::get(Atomics.getAtomicSizeValue()), - getContext().getSizeType()); - Args.add(RValue::get(EmitCastToVoidPtr(Obj.getAddress())), - getContext().VoidPtrTy); - Args.add(RValue::get(EmitCastToVoidPtr(ExpectedAddr)), - getContext().VoidPtrTy); - Args.add(RValue::get(EmitCastToVoidPtr(DesiredAddr)), - getContext().VoidPtrTy); - Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Success)), - getContext().IntTy); - Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Failure)), - getContext().IntTy); - auto SuccessFailureRVal = emitAtomicLibcall( - *this, "__atomic_compare_exchange", getContext().BoolTy, Args); - auto *PreviousVal = - Builder.CreateAlignedLoad(ExpectedAddr, Alignment.getQuantity()); - return std::make_pair(RValue::get(PreviousVal), SuccessFailureRVal); - } - - // If we've got a scalar value of the right size, try to avoid going - // through memory. - auto *ExpectedIntVal = Atomics.convertRValueToInt(Expected); - auto *DesiredIntVal = Atomics.convertRValueToInt(Desired); - - // Do the atomic store. - auto *Addr = Atomics.emitCastToAtomicIntPointer(Obj.getAddress()); - auto *Inst = Builder.CreateAtomicCmpXchg(Addr, ExpectedIntVal, DesiredIntVal, - Success, Failure); - // Other decoration. - Inst->setVolatile(Obj.isVolatileQualified()); - Inst->setWeak(IsWeak); + return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure, + IsWeak); +} - // Okay, turn that back into the original value type. - auto *PreviousVal = Builder.CreateExtractValue(Inst, /*Idxs=*/0); - auto *SuccessFailureVal = Builder.CreateExtractValue(Inst, /*Idxs=*/1); - return std::make_pair(Atomics.convertIntToValue(PreviousVal, Slot, Loc), - RValue::get(SuccessFailureVal)); +void CodeGenFunction::EmitAtomicUpdate( + LValue LVal, llvm::AtomicOrdering AO, + const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) { + AtomicInfo Atomics(*this, LVal); + Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile); } void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) { @@ -1214,13 +1752,13 @@ void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) { switch (atomics.getEvaluationKind()) { case TEK_Scalar: { llvm::Value *value = EmitScalarExpr(init); - atomics.emitCopyIntoMemory(RValue::get(value), dest); + atomics.emitCopyIntoMemory(RValue::get(value)); return; } case TEK_Complex: { ComplexPairTy value = EmitComplexExpr(init); - atomics.emitCopyIntoMemory(RValue::getComplex(value), dest); + atomics.emitCopyIntoMemory(RValue::getComplex(value)); return; } @@ -1229,8 +1767,8 @@ void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) { // of atomic type. bool Zeroed = false; if (!init->getType()->isAtomicType()) { - Zeroed = atomics.emitMemSetZeroIfNecessary(dest); - dest = atomics.projectValue(dest); + Zeroed = atomics.emitMemSetZeroIfNecessary(); + dest = atomics.projectValue(); } // Evaluate the expression directly into the destination. diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp index b98460a..3fd344c 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp @@ -621,8 +621,8 @@ static void enterBlockScope(CodeGenFunction &CGF, BlockDecl *block) { } // GEP down to the address. - llvm::Value *addr = CGF.Builder.CreateStructGEP(blockInfo.Address, - capture.getIndex()); + llvm::Value *addr = CGF.Builder.CreateStructGEP( + blockInfo.StructureType, blockInfo.Address, capture.getIndex()); // We can use that GEP as the dominating IP. if (!blockInfo.DominatingIP) @@ -721,6 +721,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) { // Build the block descriptor. llvm::Constant *descriptor = buildBlockDescriptor(CGM, blockInfo); + llvm::Type *blockTy = blockInfo.StructureType; llvm::AllocaInst *blockAddr = blockInfo.Address; assert(blockAddr && "block has no address!"); @@ -732,14 +733,17 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) { if (blockInfo.UsesStret) flags |= BLOCK_USE_STRET; // Initialize the block literal. - Builder.CreateStore(isa, Builder.CreateStructGEP(blockAddr, 0, "block.isa")); - Builder.CreateStore(llvm::ConstantInt::get(IntTy, flags.getBitMask()), - Builder.CreateStructGEP(blockAddr, 1, "block.flags")); - Builder.CreateStore(llvm::ConstantInt::get(IntTy, 0), - Builder.CreateStructGEP(blockAddr, 2, "block.reserved")); - Builder.CreateStore(blockFn, Builder.CreateStructGEP(blockAddr, 3, - "block.invoke")); - Builder.CreateStore(descriptor, Builder.CreateStructGEP(blockAddr, 4, + Builder.CreateStore( + isa, Builder.CreateStructGEP(blockTy, blockAddr, 0, "block.isa")); + Builder.CreateStore( + llvm::ConstantInt::get(IntTy, flags.getBitMask()), + Builder.CreateStructGEP(blockTy, blockAddr, 1, "block.flags")); + Builder.CreateStore( + llvm::ConstantInt::get(IntTy, 0), + Builder.CreateStructGEP(blockTy, blockAddr, 2, "block.reserved")); + Builder.CreateStore( + blockFn, Builder.CreateStructGEP(blockTy, blockAddr, 3, "block.invoke")); + Builder.CreateStore(descriptor, Builder.CreateStructGEP(blockTy, blockAddr, 4, "block.descriptor")); // Finally, capture all the values into the block. @@ -747,9 +751,8 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) { // First, 'this'. if (blockDecl->capturesCXXThis()) { - llvm::Value *addr = Builder.CreateStructGEP(blockAddr, - blockInfo.CXXThisIndex, - "block.captured-this.addr"); + llvm::Value *addr = Builder.CreateStructGEP( + blockTy, blockAddr, blockInfo.CXXThisIndex, "block.captured-this.addr"); Builder.CreateStore(LoadCXXThis(), addr); } @@ -766,9 +769,8 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) { // This will be a [[type]]*, except that a byref entry will just be // an i8**. - llvm::Value *blockField = - Builder.CreateStructGEP(blockAddr, capture.getIndex(), - "block.captured"); + llvm::Value *blockField = Builder.CreateStructGEP( + blockTy, blockAddr, capture.getIndex(), "block.captured"); // Compute the address of the thing we're going to move into the // block literal. @@ -779,7 +781,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) { BlockInfo->getCapture(variable); // This is a [[type]]*, except that a byref entry wil just be an i8**. - src = Builder.CreateStructGEP(LoadBlockStruct(), + src = Builder.CreateStructGEP(BlockInfo->StructureType, LoadBlockStruct(), enclosingCapture.getIndex(), "block.capture.addr"); } else if (blockDecl->isConversionFromLambda()) { @@ -964,7 +966,8 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E, Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal"); // Get the function pointer from the literal. - llvm::Value *FuncPtr = Builder.CreateStructGEP(BlockLiteral, 3); + llvm::Value *FuncPtr = Builder.CreateStructGEP( + CGM.getGenericBlockLiteralType(), BlockLiteral, 3); BlockLiteral = Builder.CreateBitCast(BlockLiteral, VoidPtrTy); @@ -1004,26 +1007,27 @@ llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable, if (capture.isConstant()) return LocalDeclMap[variable]; llvm::Value *addr = - Builder.CreateStructGEP(LoadBlockStruct(), capture.getIndex(), - "block.capture.addr"); + Builder.CreateStructGEP(BlockInfo->StructureType, LoadBlockStruct(), + capture.getIndex(), "block.capture.addr"); if (isByRef) { // addr should be a void** right now. Load, then cast the result // to byref*. addr = Builder.CreateLoad(addr); - llvm::PointerType *byrefPointerType - = llvm::PointerType::get(BuildByRefType(variable), 0); + auto *byrefType = BuildByRefType(variable); + llvm::PointerType *byrefPointerType = llvm::PointerType::get(byrefType, 0); addr = Builder.CreateBitCast(addr, byrefPointerType, "byref.addr"); // Follow the forwarding pointer. - addr = Builder.CreateStructGEP(addr, 1, "byref.forwarding"); + addr = Builder.CreateStructGEP(byrefType, addr, 1, "byref.forwarding"); addr = Builder.CreateLoad(addr, "byref.addr.forwarded"); // Cast back to byref* and GEP over to the actual object. addr = Builder.CreateBitCast(addr, byrefPointerType); - addr = Builder.CreateStructGEP(addr, getByRefValueLLVMField(variable), + addr = Builder.CreateStructGEP(byrefType, addr, + getByRefValueLLVMField(variable).second, variable->getNameAsString()); } @@ -1136,8 +1140,7 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD, args.push_back(&selfDecl); // Now add the rest of the parameters. - for (auto i : blockDecl->params()) - args.push_back(i); + args.append(blockDecl->param_begin(), blockDecl->param_end()); // Create the function declaration. const FunctionProtoType *fnType = blockInfo.getBlockExpr()->getFunctionType(); @@ -1178,7 +1181,7 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD, Alloca->setAlignment(Align); // Set the DebugLocation to empty, so the store is recognized as a // frame setup instruction by llvm::DwarfDebug::beginFunction(). - ApplyDebugLocation NL(*this); + auto NL = ApplyDebugLocation::CreateEmpty(*this); Builder.CreateAlignedStore(BlockPointer, Alloca, Align); BlockPointerDbgLoc = Alloca; } @@ -1186,9 +1189,9 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD, // If we have a C++ 'this' reference, go ahead and force it into // existence now. if (blockDecl->capturesCXXThis()) { - llvm::Value *addr = Builder.CreateStructGEP(BlockPointer, - blockInfo.CXXThisIndex, - "block.captured-this"); + llvm::Value *addr = + Builder.CreateStructGEP(blockInfo.StructureType, BlockPointer, + blockInfo.CXXThisIndex, "block.captured-this"); CXXThisValue = Builder.CreateLoad(addr, "this"); } @@ -1218,8 +1221,7 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD, EmitLambdaBlockInvokeBody(); else { PGO.assignRegionCounters(blockDecl, fn); - RegionCounter Cnt = getPGORegionCounter(blockDecl->getBody()); - Cnt.beginRegion(Builder); + incrementProfileCounter(blockDecl->getBody()); EmitStmt(blockDecl->getBody()); } @@ -1328,11 +1330,10 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) { nullptr, SC_Static, false, false); - // Create a scope with an artificial location for the body of this function. - ApplyDebugLocation NL(*this); + auto NL = ApplyDebugLocation::CreateEmpty(*this); StartFunction(FD, C.VoidTy, Fn, FI, args); - ArtificialLocation AL(*this); - + // Create a scope with an artificial location for the body of this function. + auto AL = ApplyDebugLocation::CreateArtificial(*this); llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo(); llvm::Value *src = GetAddrOfLocalVar(&srcDecl); @@ -1404,8 +1405,10 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) { } unsigned index = capture.getIndex(); - llvm::Value *srcField = Builder.CreateStructGEP(src, index); - llvm::Value *dstField = Builder.CreateStructGEP(dst, index); + llvm::Value *srcField = + Builder.CreateStructGEP(blockInfo.StructureType, src, index); + llvm::Value *dstField = + Builder.CreateStructGEP(blockInfo.StructureType, dst, index); // If there's an explicit copy expression, we do that. if (copyExpr) { @@ -1500,9 +1503,9 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) { nullptr, SC_Static, false, false); // Create a scope with an artificial location for the body of this function. - ApplyDebugLocation NL(*this); + auto NL = ApplyDebugLocation::CreateEmpty(*this); StartFunction(FD, C.VoidTy, Fn, FI, args); - ArtificialLocation AL(*this); + auto AL = ApplyDebugLocation::CreateArtificial(*this); llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo(); @@ -1562,7 +1565,8 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) { } unsigned index = capture.getIndex(); - llvm::Value *srcField = Builder.CreateStructGEP(src, index); + llvm::Value *srcField = + Builder.CreateStructGEP(blockInfo.StructureType, src, index); // If there's an explicit copy expression, we do that. if (dtor) { @@ -1801,13 +1805,15 @@ generateByrefCopyHelper(CodeGenFunction &CGF, llvm::Value *destField = CGF.GetAddrOfLocalVar(&dst); destField = CGF.Builder.CreateLoad(destField); destField = CGF.Builder.CreateBitCast(destField, byrefPtrType); - destField = CGF.Builder.CreateStructGEP(destField, valueFieldIndex, "x"); + destField = CGF.Builder.CreateStructGEP(&byrefType, destField, + valueFieldIndex, "x"); // src->x llvm::Value *srcField = CGF.GetAddrOfLocalVar(&src); srcField = CGF.Builder.CreateLoad(srcField); srcField = CGF.Builder.CreateBitCast(srcField, byrefPtrType); - srcField = CGF.Builder.CreateStructGEP(srcField, valueFieldIndex, "x"); + srcField = + CGF.Builder.CreateStructGEP(&byrefType, srcField, valueFieldIndex, "x"); byrefInfo.emitCopy(CGF, destField, srcField); } @@ -1868,7 +1874,7 @@ generateByrefDisposeHelper(CodeGenFunction &CGF, llvm::Value *V = CGF.GetAddrOfLocalVar(&src); V = CGF.Builder.CreateLoad(V); V = CGF.Builder.CreateBitCast(V, byrefType.getPointerTo(0)); - V = CGF.Builder.CreateStructGEP(V, byrefValueIndex, "x"); + V = CGF.Builder.CreateStructGEP(&byrefType, V, byrefValueIndex, "x"); byrefInfo.emitDispose(CGF, V); } @@ -1925,7 +1931,7 @@ CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType, const VarDecl &var = *emission.Variable; QualType type = var.getType(); - unsigned byrefValueIndex = getByRefValueLLVMField(&var); + unsigned byrefValueIndex = getByRefValueLLVMField(&var).second; if (const CXXRecordDecl *record = type->getAsCXXRecordDecl()) { const Expr *copyExpr = CGM.getContext().getBlockVarCopyInits(&var); @@ -1995,18 +2001,20 @@ CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType, return ::buildByrefHelpers(CGM, byrefType, byrefValueIndex, byrefInfo); } -unsigned CodeGenFunction::getByRefValueLLVMField(const ValueDecl *VD) const { +std::pair<llvm::Type *, unsigned> +CodeGenFunction::getByRefValueLLVMField(const ValueDecl *VD) const { assert(ByRefValueInfo.count(VD) && "Did not find value!"); - - return ByRefValueInfo.find(VD)->second.second; + + return ByRefValueInfo.find(VD)->second; } llvm::Value *CodeGenFunction::BuildBlockByrefAddress(llvm::Value *BaseAddr, const VarDecl *V) { - llvm::Value *Loc = Builder.CreateStructGEP(BaseAddr, 1, "forwarding"); + auto P = getByRefValueLLVMField(V); + llvm::Value *Loc = + Builder.CreateStructGEP(P.first, BaseAddr, 1, "forwarding"); Loc = Builder.CreateLoad(Loc); - Loc = Builder.CreateStructGEP(Loc, getByRefValueLLVMField(V), - V->getNameAsString()); + Loc = Builder.CreateStructGEP(P.first, Loc, P.second, V->getNameAsString()); return Loc; } @@ -2143,11 +2151,12 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) { if (type.isObjCGCWeak()) isa = 1; V = Builder.CreateIntToPtr(Builder.getInt32(isa), Int8PtrTy, "isa"); - Builder.CreateStore(V, Builder.CreateStructGEP(addr, 0, "byref.isa")); + Builder.CreateStore(V, + Builder.CreateStructGEP(nullptr, addr, 0, "byref.isa")); // Store the address of the variable into its own forwarding pointer. - Builder.CreateStore(addr, - Builder.CreateStructGEP(addr, 1, "byref.forwarding")); + Builder.CreateStore( + addr, Builder.CreateStructGEP(nullptr, addr, 1, "byref.forwarding")); // Blocks ABI: // c) the flags field is set to either 0 if no helper functions are @@ -2193,25 +2202,26 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) { printf("\n"); } } - + Builder.CreateStore(llvm::ConstantInt::get(IntTy, flags.getBitMask()), - Builder.CreateStructGEP(addr, 2, "byref.flags")); + Builder.CreateStructGEP(nullptr, addr, 2, "byref.flags")); CharUnits byrefSize = CGM.GetTargetTypeStoreSize(byrefType); V = llvm::ConstantInt::get(IntTy, byrefSize.getQuantity()); - Builder.CreateStore(V, Builder.CreateStructGEP(addr, 3, "byref.size")); + Builder.CreateStore(V, + Builder.CreateStructGEP(nullptr, addr, 3, "byref.size")); if (helpers) { - llvm::Value *copy_helper = Builder.CreateStructGEP(addr, 4); + llvm::Value *copy_helper = Builder.CreateStructGEP(nullptr, addr, 4); Builder.CreateStore(helpers->CopyHelper, copy_helper); - llvm::Value *destroy_helper = Builder.CreateStructGEP(addr, 5); + llvm::Value *destroy_helper = Builder.CreateStructGEP(nullptr, addr, 5); Builder.CreateStore(helpers->DisposeHelper, destroy_helper); } if (ByRefHasLifetime && HasByrefExtendedLayout) { llvm::Constant* ByrefLayoutInfo = CGM.getObjCRuntime().BuildByrefLayout(CGM, type); - llvm::Value *ByrefInfoAddr = Builder.CreateStructGEP(addr, helpers ? 6 : 4, - "byref.layout"); + llvm::Value *ByrefInfoAddr = + Builder.CreateStructGEP(nullptr, addr, helpers ? 6 : 4, "byref.layout"); // cast destination to pointer to source type. llvm::Type *DesTy = ByrefLayoutInfo->getType(); DesTy = DesTy->getPointerTo(); diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h b/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h index 72ba4fa..6610659 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h @@ -33,7 +33,7 @@ protected: llvm::BasicBlock *BB, llvm::BasicBlock::iterator InsertPt) const; private: - void operator=(const CGBuilderInserter &) LLVM_DELETED_FUNCTION; + void operator=(const CGBuilderInserter &) = delete; CodeGenFunction *CGF; }; diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp index 635e342..272baac 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "CodeGenFunction.h" +#include "CGCXXABI.h" #include "CGObjCRuntime.h" #include "CodeGenModule.h" #include "TargetInfo.h" @@ -21,9 +22,11 @@ #include "clang/Basic/TargetInfo.h" #include "clang/CodeGen/CGFunctionInfo.h" #include "llvm/ADT/StringExtras.h" +#include "llvm/IR/CallSite.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/InlineAsm.h" #include "llvm/IR/Intrinsics.h" +#include <sstream> using namespace clang; using namespace CodeGen; @@ -156,6 +159,27 @@ static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) { return Call; } +/// Emit the computation of the sign bit for a floating point value. Returns +/// the i1 sign bit value. +static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) { + LLVMContext &C = CGF.CGM.getLLVMContext(); + + llvm::Type *Ty = V->getType(); + int Width = Ty->getPrimitiveSizeInBits(); + llvm::Type *IntTy = llvm::IntegerType::get(C, Width); + V = CGF.Builder.CreateBitCast(V, IntTy); + if (Ty->isPPC_FP128Ty()) { + // The higher-order double comes first, and so we need to truncate the + // pair to extract the overall sign. The order of the pair is the same + // in both little- and big-Endian modes. + Width >>= 1; + IntTy = llvm::IntegerType::get(C, Width); + V = CGF.Builder.CreateTrunc(V, IntTy); + } + Value *Zero = llvm::Constant::getNullValue(IntTy); + return CGF.Builder.CreateICmpSLT(V, Zero); +} + static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn, const CallExpr *E, llvm::Value *calleeValue) { return CGF.EmitCall(E->getCallee()->getType(), calleeValue, E, @@ -181,7 +205,7 @@ static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF, "arguments have the same integer width?)"); llvm::Value *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType()); - llvm::Value *Tmp = CGF.Builder.CreateCall2(Callee, X, Y); + llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y}); Carry = CGF.Builder.CreateExtractValue(Tmp, 1); return CGF.Builder.CreateExtractValue(Tmp, 0); } @@ -230,8 +254,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, DstPtr = Builder.CreateBitCast(DstPtr, Type); SrcPtr = Builder.CreateBitCast(SrcPtr, Type); - return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy), - DstPtr, SrcPtr)); + return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy), + {DstPtr, SrcPtr})); } case Builtin::BI__builtin_abs: case Builtin::BI__builtin_labs: @@ -309,7 +333,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, llvm::Type *ResultType = ConvertType(E->getType()); Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); - Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef); + Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef}); if (Result->getType() != ResultType) Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, "cast"); @@ -326,7 +350,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, llvm::Type *ResultType = ConvertType(E->getType()); Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); - Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef); + Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef}); if (Result->getType() != ResultType) Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, "cast"); @@ -342,9 +366,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); llvm::Type *ResultType = ConvertType(E->getType()); - Value *Tmp = Builder.CreateAdd(Builder.CreateCall2(F, ArgValue, - Builder.getTrue()), - llvm::ConstantInt::get(ArgType, 1)); + Value *Tmp = + Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}), + llvm::ConstantInt::get(ArgType, 1)); Value *Zero = llvm::Constant::getNullValue(ArgType); Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero"); Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs"); @@ -389,11 +413,16 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Value *ArgValue = EmitScalarExpr(E->getArg(0)); llvm::Type *ArgType = ArgValue->getType(); - Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType); Value *ExpectedValue = EmitScalarExpr(E->getArg(1)); + // Don't generate llvm.expect on -O0 as the backend won't use it for + // anything. + // Note, we still IRGen ExpectedValue because it could have side-effects. + if (CGM.getCodeGenOpts().OptimizationLevel == 0) + return RValue::get(ArgValue); - Value *Result = Builder.CreateCall2(FnExpect, ArgValue, ExpectedValue, - "expval"); + Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType); + Value *Result = + Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval"); return RValue::get(Result); } case Builtin::BI__builtin_assume_aligned: { @@ -444,7 +473,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, // FIXME: Get right address space. llvm::Type *Tys[] = { ResType, Builder.getInt8PtrTy(0) }; Value *F = CGM.getIntrinsic(Intrinsic::objectsize, Tys); - return RValue::get(Builder.CreateCall2(F, EmitScalarExpr(E->getArg(0)),CI)); + return RValue::get( + Builder.CreateCall(F, {EmitScalarExpr(E->getArg(0)), CI})); } case Builtin::BI__builtin_prefetch: { Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0)); @@ -455,25 +485,25 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, llvm::ConstantInt::get(Int32Ty, 3); Value *Data = llvm::ConstantInt::get(Int32Ty, 1); Value *F = CGM.getIntrinsic(Intrinsic::prefetch); - return RValue::get(Builder.CreateCall4(F, Address, RW, Locality, Data)); + return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data})); } case Builtin::BI__builtin_readcyclecounter: { Value *F = CGM.getIntrinsic(Intrinsic::readcyclecounter); - return RValue::get(Builder.CreateCall(F)); + return RValue::get(Builder.CreateCall(F, {})); } case Builtin::BI__builtin___clear_cache: { Value *Begin = EmitScalarExpr(E->getArg(0)); Value *End = EmitScalarExpr(E->getArg(1)); Value *F = CGM.getIntrinsic(Intrinsic::clear_cache); - return RValue::get(Builder.CreateCall2(F, Begin, End)); + return RValue::get(Builder.CreateCall(F, {Begin, End})); } case Builtin::BI__builtin_trap: { Value *F = CGM.getIntrinsic(Intrinsic::trap); - return RValue::get(Builder.CreateCall(F)); + return RValue::get(Builder.CreateCall(F, {})); } case Builtin::BI__debugbreak: { Value *F = CGM.getIntrinsic(Intrinsic::debugtrap); - return RValue::get(Builder.CreateCall(F)); + return RValue::get(Builder.CreateCall(F, {})); } case Builtin::BI__builtin_unreachable: { if (SanOpts.has(SanitizerKind::Unreachable)) { @@ -498,7 +528,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Value *Exponent = EmitScalarExpr(E->getArg(1)); llvm::Type *ArgType = Base->getType(); Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType); - return RValue::get(Builder.CreateCall2(F, Base, Exponent)); + return RValue::get(Builder.CreateCall(F, {Base, Exponent})); } case Builtin::BI__builtin_isgreater: @@ -551,8 +581,22 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); } - // TODO: BI__builtin_isinf_sign - // isinf_sign(x) -> isinf(x) ? (signbit(x) ? -1 : 1) : 0 + case Builtin::BI__builtin_isinf_sign: { + // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0 + Value *Arg = EmitScalarExpr(E->getArg(0)); + Value *AbsArg = EmitFAbs(*this, Arg); + Value *IsInf = Builder.CreateFCmpOEQ( + AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf"); + Value *IsNeg = EmitSignBit(*this, Arg); + + llvm::Type *IntTy = ConvertType(E->getType()); + Value *Zero = Constant::getNullValue(IntTy); + Value *One = ConstantInt::get(IntTy, 1); + Value *NegativeOne = ConstantInt::get(IntTy, -1); + Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One); + Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero); + return RValue::get(Result); + } case Builtin::BI__builtin_isnormal: { // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min @@ -815,7 +859,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32 : Intrinsic::eh_return_i64); - Builder.CreateCall2(F, Int, Ptr); + Builder.CreateCall(F, {Int, Ptr}); Builder.CreateUnreachable(); // We do need to preserve an insertion point. @@ -825,7 +869,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, } case Builtin::BI__builtin_unwind_init: { Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init); - return RValue::get(Builder.CreateCall(F)); + return RValue::get(Builder.CreateCall(F, {})); } case Builtin::BI__builtin_extend_pointer: { // Extends a pointer to the size of an _Unwind_Word, which is @@ -864,7 +908,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, // Store the stack pointer to the setjmp buffer. Value *StackAddr = - Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave)); + Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave), {}); Value *StackSaveSlot = Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2)); Builder.CreateStore(StackAddr, StackSaveSlot); @@ -1357,6 +1401,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, return RValue::get(Builder.CreateCall(F, Arg0)); } + case Builtin::BI__builtin_pow: + case Builtin::BI__builtin_powf: + case Builtin::BI__builtin_powl: case Builtin::BIpow: case Builtin::BIpowf: case Builtin::BIpowl: { @@ -1367,7 +1414,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Value *Exponent = EmitScalarExpr(E->getArg(1)); llvm::Type *ArgType = Base->getType(); Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType); - return RValue::get(Builder.CreateCall2(F, Base, Exponent)); + return RValue::get(Builder.CreateCall(F, {Base, Exponent})); } case Builtin::BIfma: @@ -1380,32 +1427,17 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Value *FirstArg = EmitScalarExpr(E->getArg(0)); llvm::Type *ArgType = FirstArg->getType(); Value *F = CGM.getIntrinsic(Intrinsic::fma, ArgType); - return RValue::get(Builder.CreateCall3(F, FirstArg, - EmitScalarExpr(E->getArg(1)), - EmitScalarExpr(E->getArg(2)))); + return RValue::get( + Builder.CreateCall(F, {FirstArg, EmitScalarExpr(E->getArg(1)), + EmitScalarExpr(E->getArg(2))})); } case Builtin::BI__builtin_signbit: case Builtin::BI__builtin_signbitf: case Builtin::BI__builtin_signbitl: { - LLVMContext &C = CGM.getLLVMContext(); - - Value *Arg = EmitScalarExpr(E->getArg(0)); - llvm::Type *ArgTy = Arg->getType(); - int ArgWidth = ArgTy->getPrimitiveSizeInBits(); - llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth); - Value *BCArg = Builder.CreateBitCast(Arg, ArgIntTy); - if (ArgTy->isPPC_FP128Ty()) { - // The higher-order double comes first, and so we need to truncate the - // pair to extract the overall sign. The order of the pair is the same - // in both little- and big-Endian modes. - ArgWidth >>= 1; - ArgIntTy = llvm::IntegerType::get(C, ArgWidth); - BCArg = Builder.CreateTrunc(BCArg, ArgIntTy); - } - Value *ZeroCmp = llvm::Constant::getNullValue(ArgIntTy); - Value *Result = Builder.CreateICmpSLT(BCArg, ZeroCmp); - return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType()))); + return RValue::get( + Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))), + ConvertType(E->getType()))); } case Builtin::BI__builtin_annotation: { llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0)); @@ -1650,6 +1682,76 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Builder.CreateAlignedLoad(IntToPtr, /*Align=*/4, /*isVolatile=*/true); return RValue::get(Load); } + + case Builtin::BI__exception_code: + case Builtin::BI_exception_code: + return RValue::get(EmitSEHExceptionCode()); + case Builtin::BI__exception_info: + case Builtin::BI_exception_info: + return RValue::get(EmitSEHExceptionInfo()); + case Builtin::BI__abnormal_termination: + case Builtin::BI_abnormal_termination: + return RValue::get(EmitSEHAbnormalTermination()); + case Builtin::BI_setjmpex: { + if (getTarget().getTriple().isOSMSVCRT()) { + llvm::Type *ArgTypes[] = {Int8PtrTy, Int8PtrTy}; + llvm::AttributeSet ReturnsTwiceAttr = + AttributeSet::get(getLLVMContext(), llvm::AttributeSet::FunctionIndex, + llvm::Attribute::ReturnsTwice); + llvm::Constant *SetJmpEx = CGM.CreateRuntimeFunction( + llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/false), + "_setjmpex", ReturnsTwiceAttr); + llvm::Value *Buf = Builder.CreateBitOrPointerCast( + EmitScalarExpr(E->getArg(0)), Int8PtrTy); + llvm::Value *FrameAddr = + Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress), + ConstantInt::get(Int32Ty, 0)); + llvm::Value *Args[] = {Buf, FrameAddr}; + llvm::CallSite CS = EmitRuntimeCallOrInvoke(SetJmpEx, Args); + CS.setAttributes(ReturnsTwiceAttr); + return RValue::get(CS.getInstruction()); + } + break; + } + case Builtin::BI_setjmp: { + if (getTarget().getTriple().isOSMSVCRT()) { + llvm::AttributeSet ReturnsTwiceAttr = + AttributeSet::get(getLLVMContext(), llvm::AttributeSet::FunctionIndex, + llvm::Attribute::ReturnsTwice); + llvm::Value *Buf = Builder.CreateBitOrPointerCast( + EmitScalarExpr(E->getArg(0)), Int8PtrTy); + llvm::CallSite CS; + if (getTarget().getTriple().getArch() == llvm::Triple::x86) { + llvm::Type *ArgTypes[] = {Int8PtrTy, IntTy}; + llvm::Constant *SetJmp3 = CGM.CreateRuntimeFunction( + llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/true), + "_setjmp3", ReturnsTwiceAttr); + llvm::Value *Count = ConstantInt::get(IntTy, 0); + llvm::Value *Args[] = {Buf, Count}; + CS = EmitRuntimeCallOrInvoke(SetJmp3, Args); + } else { + llvm::Type *ArgTypes[] = {Int8PtrTy, Int8PtrTy}; + llvm::Constant *SetJmp = CGM.CreateRuntimeFunction( + llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/false), + "_setjmp", ReturnsTwiceAttr); + llvm::Value *FrameAddr = + Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress), + ConstantInt::get(Int32Ty, 0)); + llvm::Value *Args[] = {Buf, FrameAddr}; + CS = EmitRuntimeCallOrInvoke(SetJmp, Args); + } + CS.setAttributes(ReturnsTwiceAttr); + return RValue::get(CS.getInstruction()); + } + break; + } + + case Builtin::BI__GetExceptionInfo: { + if (llvm::GlobalVariable *GV = + CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType())) + return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy)); + break; + } } // If this is an alias for a lib function (e.g. __builtin_sin), emit @@ -1764,6 +1866,8 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID, case llvm::Triple::r600: case llvm::Triple::amdgcn: return EmitR600BuiltinExpr(BuiltinID, E); + case llvm::Triple::systemz: + return EmitSystemZBuiltinExpr(BuiltinID, E); default: return nullptr; } @@ -2534,7 +2638,7 @@ Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID, // Return type. SmallVector<llvm::Type *, 3> Tys; if (Modifier & AddRetType) { - llvm::Type *Ty = ConvertType(E->getCallReturnType()); + llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext())); if (Modifier & VectorizeRetType) Ty = llvm::VectorType::get( Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1); @@ -2812,7 +2916,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr( Ops[2] = Builder.CreateBitCast(Ops[2], Ty); // NEON intrinsic puts accumulator first, unlike the LLVM fma. - return Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]); + return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]}); } case NEON::BI__builtin_neon_vld1_v: case NEON::BI__builtin_neon_vld1q_v: @@ -2825,7 +2929,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr( case NEON::BI__builtin_neon_vld4_v: case NEON::BI__builtin_neon_vld4q_v: { Function *F = CGM.getIntrinsic(LLVMIntrinsic, Ty); - Ops[1] = Builder.CreateCall2(F, Ops[1], Align, NameHint); + Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint); Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); Ops[0] = Builder.CreateBitCast(Ops[0], Ty); return Builder.CreateStore(Ops[1], Ops[0]); @@ -3004,7 +3108,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr( Indices.push_back(Builder.getInt32(i+vi)); Indices.push_back(Builder.getInt32(i+e+vi)); } - Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); + Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); SV = llvm::ConstantVector::get(Indices); SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn"); SV = Builder.CreateStore(SV, Addr); @@ -3032,7 +3136,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr( for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi)); - Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); + Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); SV = llvm::ConstantVector::get(Indices); SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp"); SV = Builder.CreateStore(SV, Addr); @@ -3052,7 +3156,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr( Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1)); Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e)); } - Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); + Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); SV = llvm::ConstantVector::get(Indices); SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip"); SV = Builder.CreateStore(SV, Addr); @@ -3185,7 +3289,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "", /*SideEffects=*/true); - return Builder.CreateCall(Emit); + return Builder.CreateCall(Emit, {}); } if (BuiltinID == ARM::BI__builtin_arm_dbg) { @@ -3202,7 +3306,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, Value *Locality = llvm::ConstantInt::get(Int32Ty, 3); Value *F = CGM.getIntrinsic(Intrinsic::prefetch); - return Builder.CreateCall4(F, Address, RW, Locality, IsData); + return Builder.CreateCall(F, {Address, RW, Locality, IsData}); } if (BuiltinID == ARM::BI__builtin_arm_rbit) { @@ -3300,7 +3404,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, Value *Arg0 = Builder.CreateExtractValue(Val, 0); Value *Arg1 = Builder.CreateExtractValue(Val, 1); Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy); - return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "strexd"); + return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd"); } if (BuiltinID == ARM::BI__builtin_arm_strex || @@ -3324,12 +3428,12 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, ? Intrinsic::arm_stlex : Intrinsic::arm_strex, StoreAddr->getType()); - return Builder.CreateCall2(F, StoreVal, StoreAddr, "strex"); + return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex"); } if (BuiltinID == ARM::BI__builtin_arm_clrex) { Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex); - return Builder.CreateCall(F); + return Builder.CreateCall(F, {}); } // CRC32 @@ -3365,13 +3469,13 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty); Function *F = CGM.getIntrinsic(CRCIntrinsicID); - Value *Res = Builder.CreateCall2(F, Arg0, Arg1a); - return Builder.CreateCall2(F, Res, Arg1b); + Value *Res = Builder.CreateCall(F, {Arg0, Arg1a}); + return Builder.CreateCall(F, {Res, Arg1b}); } else { Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty); Function *F = CGM.getIntrinsic(CRCIntrinsicID); - return Builder.CreateCall2(F, Arg0, Arg1); + return Builder.CreateCall(F, {Arg0, Arg1}); } } @@ -3547,7 +3651,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, // Load the value as a one-element vector. Ty = llvm::VectorType::get(VTy->getElementType(), 1); Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty); - Value *Ld = Builder.CreateCall2(F, Ops[0], Align); + Value *Ld = Builder.CreateCall(F, {Ops[0], Align}); // Combine them. SmallVector<Constant*, 2> Indices; Indices.push_back(ConstantInt::get(Int32Ty, 1-Lane)); @@ -3582,7 +3686,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, default: llvm_unreachable("unknown vld_dup intrinsic?"); } Function *F = CGM.getIntrinsic(Int, Ty); - Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup"); + Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, "vld_dup"); Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); Ops[0] = Builder.CreateBitCast(Ops[0], Ty); return Builder.CreateStore(Ops[1], Ops[0]); @@ -3651,7 +3755,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, Ops[1] = Builder.CreateBitCast(Ops[1], Ty); Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true); Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; - Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]); + Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]}); return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n"); case NEON::BI__builtin_neon_vsri_n_v: case NEON::BI__builtin_neon_vsriq_n_v: @@ -3979,7 +4083,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify // PLDL3STRM or PLDL2STRM. Value *F = CGM.getIntrinsic(Intrinsic::prefetch); - return Builder.CreateCall4(F, Address, RW, Locality, IsData); + return Builder.CreateCall(F, {Address, RW, Locality, IsData}); } if (BuiltinID == AArch64::BI__builtin_arm_rbit) { @@ -4074,9 +4178,11 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, Value *Arg1 = Builder.CreateExtractValue(Val, 1); Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy); - return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "stxp"); - } else if (BuiltinID == AArch64::BI__builtin_arm_strex || - BuiltinID == AArch64::BI__builtin_arm_stlex) { + return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp"); + } + + if (BuiltinID == AArch64::BI__builtin_arm_strex || + BuiltinID == AArch64::BI__builtin_arm_stlex) { Value *StoreVal = EmitScalarExpr(E->getArg(0)); Value *StoreAddr = EmitScalarExpr(E->getArg(1)); @@ -4096,12 +4202,12 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, ? Intrinsic::aarch64_stlxr : Intrinsic::aarch64_stxr, StoreAddr->getType()); - return Builder.CreateCall2(F, StoreVal, StoreAddr, "stxr"); + return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr"); } if (BuiltinID == AArch64::BI__builtin_arm_clrex) { Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex); - return Builder.CreateCall(F); + return Builder.CreateCall(F, {}); } // CRC32 @@ -4133,7 +4239,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, llvm::Type *DataTy = F->getFunctionType()->getParamType(1); Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy); - return Builder.CreateCall2(F, Arg0, Arg1); + return Builder.CreateCall(F, {Arg0, Arg1}); } llvm::SmallVector<Value*, 4> Ops; @@ -4248,36 +4354,36 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, case NEON::BI__builtin_neon_vceqzs_f32: Ops.push_back(EmitScalarExpr(E->getArg(0))); return EmitAArch64CompareBuiltinExpr( - Ops[0], ConvertType(E->getCallReturnType()), ICmpInst::FCMP_OEQ, - ICmpInst::ICMP_EQ, "vceqz"); + Ops[0], ConvertType(E->getCallReturnType(getContext())), + ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz"); case NEON::BI__builtin_neon_vcgezd_s64: case NEON::BI__builtin_neon_vcgezd_f64: case NEON::BI__builtin_neon_vcgezs_f32: Ops.push_back(EmitScalarExpr(E->getArg(0))); return EmitAArch64CompareBuiltinExpr( - Ops[0], ConvertType(E->getCallReturnType()), ICmpInst::FCMP_OGE, - ICmpInst::ICMP_SGE, "vcgez"); + Ops[0], ConvertType(E->getCallReturnType(getContext())), + ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez"); case NEON::BI__builtin_neon_vclezd_s64: case NEON::BI__builtin_neon_vclezd_f64: case NEON::BI__builtin_neon_vclezs_f32: Ops.push_back(EmitScalarExpr(E->getArg(0))); return EmitAArch64CompareBuiltinExpr( - Ops[0], ConvertType(E->getCallReturnType()), ICmpInst::FCMP_OLE, - ICmpInst::ICMP_SLE, "vclez"); + Ops[0], ConvertType(E->getCallReturnType(getContext())), + ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez"); case NEON::BI__builtin_neon_vcgtzd_s64: case NEON::BI__builtin_neon_vcgtzd_f64: case NEON::BI__builtin_neon_vcgtzs_f32: Ops.push_back(EmitScalarExpr(E->getArg(0))); return EmitAArch64CompareBuiltinExpr( - Ops[0], ConvertType(E->getCallReturnType()), ICmpInst::FCMP_OGT, - ICmpInst::ICMP_SGT, "vcgtz"); + Ops[0], ConvertType(E->getCallReturnType(getContext())), + ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz"); case NEON::BI__builtin_neon_vcltzd_s64: case NEON::BI__builtin_neon_vcltzd_f64: case NEON::BI__builtin_neon_vcltzs_f32: Ops.push_back(EmitScalarExpr(E->getArg(0))); return EmitAArch64CompareBuiltinExpr( - Ops[0], ConvertType(E->getCallReturnType()), ICmpInst::FCMP_OLT, - ICmpInst::ICMP_SLT, "vcltz"); + Ops[0], ConvertType(E->getCallReturnType(getContext())), + ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz"); case NEON::BI__builtin_neon_vceqzd_u64: { llvm::Type *Ty = llvm::Type::getInt64Ty(getLLVMContext()); @@ -4528,8 +4634,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, : Intrinsic::aarch64_neon_srshl; Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty); Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2)))); - Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Int64Ty), Ops[1], - Builder.CreateSExt(Ops[2], Int64Ty)); + Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty), + {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)}); return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty)); } case NEON::BI__builtin_neon_vshld_n_s64: @@ -4699,7 +4805,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, Ops[2] = Builder.CreateBitCast(Ops[2], VTy); Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract"); Value *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy); - Value *Result = Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]); + Value *Result = Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]}); return Builder.CreateBitCast(Result, Ty); } Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty); @@ -4713,7 +4819,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, cast<ConstantInt>(Ops[3])); Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane"); - return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]); + return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]}); } case NEON::BI__builtin_neon_vfmaq_laneq_v: { Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty); @@ -4722,17 +4828,17 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, Ops[2] = Builder.CreateBitCast(Ops[2], Ty); Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3])); - return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]); + return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]}); } case NEON::BI__builtin_neon_vfmas_lane_f32: case NEON::BI__builtin_neon_vfmas_laneq_f32: case NEON::BI__builtin_neon_vfmad_lane_f64: case NEON::BI__builtin_neon_vfmad_laneq_f64: { Ops.push_back(EmitScalarExpr(E->getArg(3))); - llvm::Type *Ty = ConvertType(E->getCallReturnType()); + llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext())); Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty); Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract"); - return Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]); + return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]}); } case NEON::BI__builtin_neon_vfms_v: case NEON::BI__builtin_neon_vfmsq_v: { // Only used for FP types @@ -5667,7 +5773,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, Indices.push_back(ConstantInt::get(Int32Ty, i+vi)); Indices.push_back(ConstantInt::get(Int32Ty, i+e+vi)); } - Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); + Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); SV = llvm::ConstantVector::get(Indices); SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn"); SV = Builder.CreateStore(SV, Addr); @@ -5686,7 +5792,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi)); - Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); + Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); SV = llvm::ConstantVector::get(Indices); SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp"); SV = Builder.CreateStore(SV, Addr); @@ -5706,7 +5812,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1)); Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e)); } - Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); + Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); SV = llvm::ConstantVector::get(Indices); SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip"); SV = Builder.CreateStore(SV, Addr); @@ -5817,7 +5923,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, Value *Locality = EmitScalarExpr(E->getArg(1)); Value *Data = ConstantInt::get(Int32Ty, 1); Value *F = CGM.getIntrinsic(Intrinsic::prefetch); - return Builder.CreateCall4(F, Address, RW, Locality, Data); + return Builder.CreateCall(F, {Address, RW, Locality, Data}); } case X86::BI__builtin_ia32_vec_init_v8qi: case X86::BI__builtin_ia32_vec_init_v4hi: @@ -5856,104 +5962,95 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy); return Builder.CreateStore(Ops[1], Ops[0]); } - case X86::BI__builtin_ia32_palignr: { - unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); - - // If palignr is shifting the pair of input vectors less than 9 bytes, - // emit a shuffle instruction. - if (shiftVal <= 8) { - SmallVector<llvm::Constant*, 8> Indices; - for (unsigned i = 0; i != 8; ++i) - Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i)); - - Value* SV = llvm::ConstantVector::get(Indices); - return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); + case X86::BI__builtin_ia32_palignr128: + case X86::BI__builtin_ia32_palignr256: { + unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); + + unsigned NumElts = + cast<llvm::VectorType>(Ops[0]->getType())->getNumElements(); + assert(NumElts % 16 == 0); + unsigned NumLanes = NumElts / 16; + unsigned NumLaneElts = NumElts / NumLanes; + + // If palignr is shifting the pair of vectors more than the size of two + // lanes, emit zero. + if (ShiftVal >= (2 * NumLaneElts)) + return llvm::Constant::getNullValue(ConvertType(E->getType())); + + // If palignr is shifting the pair of input vectors more than one lane, + // but less than two lanes, convert to shifting in zeroes. + if (ShiftVal > NumLaneElts) { + ShiftVal -= NumLaneElts; + Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType()); } - // If palignr is shifting the pair of input vectors more than 8 but less - // than 16 bytes, emit a logical right shift of the destination. - if (shiftVal < 16) { - // MMX has these as 1 x i64 vectors for some odd optimization reasons. - llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1); - - Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); - Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8); - - // create i32 constant - llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q); - return Builder.CreateCall(F, makeArrayRef(Ops.data(), 2), "palignr"); + SmallVector<llvm::Constant*, 32> Indices; + // 256-bit palignr operates on 128-bit lanes so we need to handle that + for (unsigned l = 0; l != NumElts; l += NumLaneElts) { + for (unsigned i = 0; i != NumLaneElts; ++i) { + unsigned Idx = ShiftVal + i; + if (Idx >= NumLaneElts) + Idx += NumElts - NumLaneElts; // End of lane, switch operand. + Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx + l)); + } } - // If palignr is shifting the pair of vectors more than 16 bytes, emit zero. - return llvm::Constant::getNullValue(ConvertType(E->getType())); + Value* SV = llvm::ConstantVector::get(Indices); + return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); } - case X86::BI__builtin_ia32_palignr128: { - unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); + case X86::BI__builtin_ia32_pslldqi256: { + // Shift value is in bits so divide by 8. + unsigned shiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() >> 3; - // If palignr is shifting the pair of input vectors less than 17 bytes, - // emit a shuffle instruction. - if (shiftVal <= 16) { - SmallVector<llvm::Constant*, 16> Indices; - for (unsigned i = 0; i != 16; ++i) - Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i)); + // If pslldq is shifting the vector more than 15 bytes, emit zero. + if (shiftVal >= 16) + return llvm::Constant::getNullValue(ConvertType(E->getType())); - Value* SV = llvm::ConstantVector::get(Indices); - return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); + SmallVector<llvm::Constant*, 32> Indices; + // 256-bit pslldq operates on 128-bit lanes so we need to handle that + for (unsigned l = 0; l != 32; l += 16) { + for (unsigned i = 0; i != 16; ++i) { + unsigned Idx = 32 + i - shiftVal; + if (Idx < 32) Idx -= 16; // end of lane, switch operand. + Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx + l)); + } } - // If palignr is shifting the pair of input vectors more than 16 but less - // than 32 bytes, emit a logical right shift of the destination. - if (shiftVal < 32) { - llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2); - - Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); - Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8); - - // create i32 constant - llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq); - return Builder.CreateCall(F, makeArrayRef(Ops.data(), 2), "palignr"); - } + llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, 32); + Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); + Value *Zero = llvm::Constant::getNullValue(VecTy); - // If palignr is shifting the pair of vectors more than 32 bytes, emit zero. - return llvm::Constant::getNullValue(ConvertType(E->getType())); - } - case X86::BI__builtin_ia32_palignr256: { - unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); - - // If palignr is shifting the pair of input vectors less than 17 bytes, - // emit a shuffle instruction. - if (shiftVal <= 16) { - SmallVector<llvm::Constant*, 32> Indices; - // 256-bit palignr operates on 128-bit lanes so we need to handle that - for (unsigned l = 0; l != 2; ++l) { - unsigned LaneStart = l * 16; - unsigned LaneEnd = (l+1) * 16; - for (unsigned i = 0; i != 16; ++i) { - unsigned Idx = shiftVal + i + LaneStart; - if (Idx >= LaneEnd) Idx += 16; // end of lane, switch operand - Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx)); - } + Value *SV = llvm::ConstantVector::get(Indices); + SV = Builder.CreateShuffleVector(Zero, Ops[0], SV, "pslldq"); + llvm::Type *ResultType = ConvertType(E->getType()); + return Builder.CreateBitCast(SV, ResultType, "cast"); + } + case X86::BI__builtin_ia32_psrldqi256: { + // Shift value is in bits so divide by 8. + unsigned shiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() >> 3; + + // If psrldq is shifting the vector more than 15 bytes, emit zero. + if (shiftVal >= 16) + return llvm::Constant::getNullValue(ConvertType(E->getType())); + + SmallVector<llvm::Constant*, 32> Indices; + // 256-bit psrldq operates on 128-bit lanes so we need to handle that + for (unsigned l = 0; l != 32; l += 16) { + for (unsigned i = 0; i != 16; ++i) { + unsigned Idx = i + shiftVal; + if (Idx >= 16) Idx += 16; // end of lane, switch operand. + Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx + l)); } - - Value* SV = llvm::ConstantVector::get(Indices); - return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); } - // If palignr is shifting the pair of input vectors more than 16 but less - // than 32 bytes, emit a logical right shift of the destination. - if (shiftVal < 32) { - llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 4); - - Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); - Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8); - - // create i32 constant - llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_avx2_psrl_dq); - return Builder.CreateCall(F, makeArrayRef(Ops.data(), 2), "palignr"); - } + llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, 32); + Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); + Value *Zero = llvm::Constant::getNullValue(VecTy); - // If palignr is shifting the pair of vectors more than 32 bytes, emit zero. - return llvm::Constant::getNullValue(ConvertType(E->getType())); + Value *SV = llvm::ConstantVector::get(Indices); + SV = Builder.CreateShuffleVector(Ops[0], Zero, SV, "psrldq"); + llvm::Type *ResultType = ConvertType(E->getType()); + return Builder.CreateBitCast(SV, ResultType, "cast"); } case X86::BI__builtin_ia32_movntps: case X86::BI__builtin_ia32_movntps256: @@ -5987,20 +6084,10 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, // 3DNow! case X86::BI__builtin_ia32_pswapdsf: case X86::BI__builtin_ia32_pswapdsi: { - const char *name; - Intrinsic::ID ID; - switch(BuiltinID) { - default: llvm_unreachable("Unsupported intrinsic!"); - case X86::BI__builtin_ia32_pswapdsf: - case X86::BI__builtin_ia32_pswapdsi: - name = "pswapd"; - ID = Intrinsic::x86_3dnowa_pswapd; - break; - } llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext()); Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast"); - llvm::Function *F = CGM.getIntrinsic(ID); - return Builder.CreateCall(F, Ops, name); + llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd); + return Builder.CreateCall(F, Ops, "pswapd"); } case X86::BI__builtin_ia32_rdrand16_step: case X86::BI__builtin_ia32_rdrand32_step: @@ -6031,17 +6118,10 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, break; } - Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID)); + Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID), {}); Builder.CreateStore(Builder.CreateExtractValue(Call, 0), Ops[0]); return Builder.CreateExtractValue(Call, 1); } - // AVX2 broadcast - case X86::BI__builtin_ia32_vbroadcastsi256: { - Value *VecTmp = CreateMemTemp(E->getArg(0)->getType()); - Builder.CreateStore(Ops[0], VecTmp); - Value *F = CGM.getIntrinsic(Intrinsic::x86_avx2_vbroadcasti128); - return Builder.CreateCall(F, Builder.CreateBitCast(VecTmp, Int8PtrTy)); - } // SSE comparison intrisics case X86::BI__builtin_ia32_cmpeqps: case X86::BI__builtin_ia32_cmpltps: @@ -6318,7 +6398,7 @@ static Value *emitTernaryFPBuiltin(CodeGenFunction &CGF, llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2)); Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); - return CGF.Builder.CreateCall3(F, Src0, Src1, Src2); + return CGF.Builder.CreateCall(F, {Src0, Src1, Src2}); } // Emit an intrinsic that has 1 float or double operand, and 1 integer. @@ -6329,7 +6409,7 @@ static Value *emitFPIntBuiltin(CodeGenFunction &CGF, llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); - return CGF.Builder.CreateCall2(F, Src0, Src1); + return CGF.Builder.CreateCall(F, {Src0, Src1}); } Value *CodeGenFunction::EmitR600BuiltinExpr(unsigned BuiltinID, @@ -6350,7 +6430,7 @@ Value *CodeGenFunction::EmitR600BuiltinExpr(unsigned BuiltinID, llvm::Value *Callee = CGM.getIntrinsic(Intrinsic::AMDGPU_div_scale, X->getType()); - llvm::Value *Tmp = Builder.CreateCall3(Callee, X, Y, Z); + llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z}); llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0); llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1); @@ -6373,7 +6453,7 @@ Value *CodeGenFunction::EmitR600BuiltinExpr(unsigned BuiltinID, llvm::Value *F = CGM.getIntrinsic(Intrinsic::AMDGPU_div_fmas, Src0->getType()); llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3); - return Builder.CreateCall4(F, Src0, Src1, Src2, Src3ToBool); + return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool}); } case R600::BI__builtin_amdgpu_div_fixup: case R600::BI__builtin_amdgpu_div_fixupf: @@ -6400,3 +6480,246 @@ Value *CodeGenFunction::EmitR600BuiltinExpr(unsigned BuiltinID, return nullptr; } } + +/// Handle a SystemZ function in which the final argument is a pointer +/// to an int that receives the post-instruction CC value. At the LLVM level +/// this is represented as a function that returns a {result, cc} pair. +static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF, + unsigned IntrinsicID, + const CallExpr *E) { + unsigned NumArgs = E->getNumArgs() - 1; + SmallVector<Value *, 8> Args(NumArgs); + for (unsigned I = 0; I < NumArgs; ++I) + Args[I] = CGF.EmitScalarExpr(E->getArg(I)); + Value *CCPtr = CGF.EmitScalarExpr(E->getArg(NumArgs)); + Value *F = CGF.CGM.getIntrinsic(IntrinsicID); + Value *Call = CGF.Builder.CreateCall(F, Args); + Value *CC = CGF.Builder.CreateExtractValue(Call, 1); + CGF.Builder.CreateStore(CC, CCPtr); + return CGF.Builder.CreateExtractValue(Call, 0); +} + +Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID, + const CallExpr *E) { + switch (BuiltinID) { + case SystemZ::BI__builtin_tbegin: { + Value *TDB = EmitScalarExpr(E->getArg(0)); + Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c); + Value *F = CGM.getIntrinsic(Intrinsic::s390_tbegin); + return Builder.CreateCall(F, {TDB, Control}); + } + case SystemZ::BI__builtin_tbegin_nofloat: { + Value *TDB = EmitScalarExpr(E->getArg(0)); + Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c); + Value *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat); + return Builder.CreateCall(F, {TDB, Control}); + } + case SystemZ::BI__builtin_tbeginc: { + Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy); + Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08); + Value *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc); + return Builder.CreateCall(F, {TDB, Control}); + } + case SystemZ::BI__builtin_tabort: { + Value *Data = EmitScalarExpr(E->getArg(0)); + Value *F = CGM.getIntrinsic(Intrinsic::s390_tabort); + return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort")); + } + case SystemZ::BI__builtin_non_tx_store: { + Value *Address = EmitScalarExpr(E->getArg(0)); + Value *Data = EmitScalarExpr(E->getArg(1)); + Value *F = CGM.getIntrinsic(Intrinsic::s390_ntstg); + return Builder.CreateCall(F, {Data, Address}); + } + + // Vector builtins. Note that most vector builtins are mapped automatically + // to target-specific LLVM intrinsics. The ones handled specially here can + // be represented via standard LLVM IR, which is preferable to enable common + // LLVM optimizations. + + case SystemZ::BI__builtin_s390_vpopctb: + case SystemZ::BI__builtin_s390_vpopcth: + case SystemZ::BI__builtin_s390_vpopctf: + case SystemZ::BI__builtin_s390_vpopctg: { + llvm::Type *ResultType = ConvertType(E->getType()); + Value *X = EmitScalarExpr(E->getArg(0)); + Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType); + return Builder.CreateCall(F, X); + } + + case SystemZ::BI__builtin_s390_vclzb: + case SystemZ::BI__builtin_s390_vclzh: + case SystemZ::BI__builtin_s390_vclzf: + case SystemZ::BI__builtin_s390_vclzg: { + llvm::Type *ResultType = ConvertType(E->getType()); + Value *X = EmitScalarExpr(E->getArg(0)); + Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); + Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType); + return Builder.CreateCall(F, {X, Undef}); + } + + case SystemZ::BI__builtin_s390_vctzb: + case SystemZ::BI__builtin_s390_vctzh: + case SystemZ::BI__builtin_s390_vctzf: + case SystemZ::BI__builtin_s390_vctzg: { + llvm::Type *ResultType = ConvertType(E->getType()); + Value *X = EmitScalarExpr(E->getArg(0)); + Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false); + Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType); + return Builder.CreateCall(F, {X, Undef}); + } + + case SystemZ::BI__builtin_s390_vfsqdb: { + llvm::Type *ResultType = ConvertType(E->getType()); + Value *X = EmitScalarExpr(E->getArg(0)); + Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType); + return Builder.CreateCall(F, X); + } + case SystemZ::BI__builtin_s390_vfmadb: { + llvm::Type *ResultType = ConvertType(E->getType()); + Value *X = EmitScalarExpr(E->getArg(0)); + Value *Y = EmitScalarExpr(E->getArg(1)); + Value *Z = EmitScalarExpr(E->getArg(2)); + Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); + return Builder.CreateCall(F, {X, Y, Z}); + } + case SystemZ::BI__builtin_s390_vfmsdb: { + llvm::Type *ResultType = ConvertType(E->getType()); + Value *X = EmitScalarExpr(E->getArg(0)); + Value *Y = EmitScalarExpr(E->getArg(1)); + Value *Z = EmitScalarExpr(E->getArg(2)); + Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType); + Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); + return Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")}); + } + case SystemZ::BI__builtin_s390_vflpdb: { + llvm::Type *ResultType = ConvertType(E->getType()); + Value *X = EmitScalarExpr(E->getArg(0)); + Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType); + return Builder.CreateCall(F, X); + } + case SystemZ::BI__builtin_s390_vflndb: { + llvm::Type *ResultType = ConvertType(E->getType()); + Value *X = EmitScalarExpr(E->getArg(0)); + Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType); + Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType); + return Builder.CreateFSub(Zero, Builder.CreateCall(F, X), "sub"); + } + case SystemZ::BI__builtin_s390_vfidb: { + llvm::Type *ResultType = ConvertType(E->getType()); + Value *X = EmitScalarExpr(E->getArg(0)); + // Constant-fold the M4 and M5 mask arguments. + llvm::APSInt M4, M5; + bool IsConstM4 = E->getArg(1)->isIntegerConstantExpr(M4, getContext()); + bool IsConstM5 = E->getArg(2)->isIntegerConstantExpr(M5, getContext()); + assert(IsConstM4 && IsConstM5 && "Constant arg isn't actually constant?"); + (void)IsConstM4; (void)IsConstM5; + // Check whether this instance of vfidb can be represented via a LLVM + // standard intrinsic. We only support some combinations of M4 and M5. + Intrinsic::ID ID = Intrinsic::not_intrinsic; + switch (M4.getZExtValue()) { + default: break; + case 0: // IEEE-inexact exception allowed + switch (M5.getZExtValue()) { + default: break; + case 0: ID = Intrinsic::rint; break; + } + break; + case 4: // IEEE-inexact exception suppressed + switch (M5.getZExtValue()) { + default: break; + case 0: ID = Intrinsic::nearbyint; break; + case 1: ID = Intrinsic::round; break; + case 5: ID = Intrinsic::trunc; break; + case 6: ID = Intrinsic::ceil; break; + case 7: ID = Intrinsic::floor; break; + } + break; + } + if (ID != Intrinsic::not_intrinsic) { + Function *F = CGM.getIntrinsic(ID, ResultType); + return Builder.CreateCall(F, X); + } + Function *F = CGM.getIntrinsic(Intrinsic::s390_vfidb); + Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4); + Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5); + return Builder.CreateCall(F, {X, M4Value, M5Value}); + } + + // Vector intrisincs that output the post-instruction CC value. + +#define INTRINSIC_WITH_CC(NAME) \ + case SystemZ::BI__builtin_##NAME: \ + return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E) + + INTRINSIC_WITH_CC(s390_vpkshs); + INTRINSIC_WITH_CC(s390_vpksfs); + INTRINSIC_WITH_CC(s390_vpksgs); + + INTRINSIC_WITH_CC(s390_vpklshs); + INTRINSIC_WITH_CC(s390_vpklsfs); + INTRINSIC_WITH_CC(s390_vpklsgs); + + INTRINSIC_WITH_CC(s390_vceqbs); + INTRINSIC_WITH_CC(s390_vceqhs); + INTRINSIC_WITH_CC(s390_vceqfs); + INTRINSIC_WITH_CC(s390_vceqgs); + + INTRINSIC_WITH_CC(s390_vchbs); + INTRINSIC_WITH_CC(s390_vchhs); + INTRINSIC_WITH_CC(s390_vchfs); + INTRINSIC_WITH_CC(s390_vchgs); + + INTRINSIC_WITH_CC(s390_vchlbs); + INTRINSIC_WITH_CC(s390_vchlhs); + INTRINSIC_WITH_CC(s390_vchlfs); + INTRINSIC_WITH_CC(s390_vchlgs); + + INTRINSIC_WITH_CC(s390_vfaebs); + INTRINSIC_WITH_CC(s390_vfaehs); + INTRINSIC_WITH_CC(s390_vfaefs); + + INTRINSIC_WITH_CC(s390_vfaezbs); + INTRINSIC_WITH_CC(s390_vfaezhs); + INTRINSIC_WITH_CC(s390_vfaezfs); + + INTRINSIC_WITH_CC(s390_vfeebs); + INTRINSIC_WITH_CC(s390_vfeehs); + INTRINSIC_WITH_CC(s390_vfeefs); + + INTRINSIC_WITH_CC(s390_vfeezbs); + INTRINSIC_WITH_CC(s390_vfeezhs); + INTRINSIC_WITH_CC(s390_vfeezfs); + + INTRINSIC_WITH_CC(s390_vfenebs); + INTRINSIC_WITH_CC(s390_vfenehs); + INTRINSIC_WITH_CC(s390_vfenefs); + + INTRINSIC_WITH_CC(s390_vfenezbs); + INTRINSIC_WITH_CC(s390_vfenezhs); + INTRINSIC_WITH_CC(s390_vfenezfs); + + INTRINSIC_WITH_CC(s390_vistrbs); + INTRINSIC_WITH_CC(s390_vistrhs); + INTRINSIC_WITH_CC(s390_vistrfs); + + INTRINSIC_WITH_CC(s390_vstrcbs); + INTRINSIC_WITH_CC(s390_vstrchs); + INTRINSIC_WITH_CC(s390_vstrcfs); + + INTRINSIC_WITH_CC(s390_vstrczbs); + INTRINSIC_WITH_CC(s390_vstrczhs); + INTRINSIC_WITH_CC(s390_vstrczfs); + + INTRINSIC_WITH_CC(s390_vfcedbs); + INTRINSIC_WITH_CC(s390_vfchdbs); + INTRINSIC_WITH_CC(s390_vfchedbs); + + INTRINSIC_WITH_CC(s390_vftcidb); + +#undef INTRINSIC_WITH_CC + + default: + return nullptr; + } +} diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCUDANV.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCUDANV.cpp index fb11751..67d0ab7 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGCUDANV.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCUDANV.cpp @@ -20,7 +20,6 @@ #include "llvm/IR/CallSite.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DerivedTypes.h" -#include <vector> using namespace clang; using namespace CodeGen; @@ -30,29 +29,66 @@ namespace { class CGNVCUDARuntime : public CGCUDARuntime { private: - llvm::Type *IntTy, *SizeTy; - llvm::PointerType *CharPtrTy, *VoidPtrTy; + llvm::Type *IntTy, *SizeTy, *VoidTy; + llvm::PointerType *CharPtrTy, *VoidPtrTy, *VoidPtrPtrTy; + + /// Convenience reference to LLVM Context + llvm::LLVMContext &Context; + /// Convenience reference to the current module + llvm::Module &TheModule; + /// Keeps track of kernel launch stubs emitted in this module + llvm::SmallVector<llvm::Function *, 16> EmittedKernels; + /// Keeps track of variables containing handles of GPU binaries. Populated by + /// ModuleCtorFunction() and used to create corresponding cleanup calls in + /// ModuleDtorFunction() + llvm::SmallVector<llvm::GlobalVariable *, 16> GpuBinaryHandles; llvm::Constant *getSetupArgumentFn() const; llvm::Constant *getLaunchFn() const; + /// Creates a function to register all kernel stubs generated in this module. + llvm::Function *makeRegisterKernelsFn(); + + /// Helper function that generates a constant string and returns a pointer to + /// the start of the string. The result of this function can be used anywhere + /// where the C code specifies const char*. + llvm::Constant *makeConstantString(const std::string &Str, + const std::string &Name = "", + unsigned Alignment = 0) { + llvm::Constant *Zeros[] = {llvm::ConstantInt::get(SizeTy, 0), + llvm::ConstantInt::get(SizeTy, 0)}; + auto *ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str()); + return llvm::ConstantExpr::getGetElementPtr(ConstStr->getValueType(), + ConstStr, Zeros); + } + + void emitDeviceStubBody(CodeGenFunction &CGF, FunctionArgList &Args); + public: CGNVCUDARuntime(CodeGenModule &CGM); - void EmitDeviceStubBody(CodeGenFunction &CGF, FunctionArgList &Args) override; + void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) override; + /// Creates module constructor function + llvm::Function *makeModuleCtorFunction() override; + /// Creates module destructor function + llvm::Function *makeModuleDtorFunction() override; }; } -CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM) : CGCUDARuntime(CGM) { +CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM) + : CGCUDARuntime(CGM), Context(CGM.getLLVMContext()), + TheModule(CGM.getModule()) { CodeGen::CodeGenTypes &Types = CGM.getTypes(); ASTContext &Ctx = CGM.getContext(); IntTy = Types.ConvertType(Ctx.IntTy); SizeTy = Types.ConvertType(Ctx.getSizeType()); + VoidTy = llvm::Type::getVoidTy(Context); CharPtrTy = llvm::PointerType::getUnqual(Types.ConvertType(Ctx.CharTy)); VoidPtrTy = cast<llvm::PointerType>(Types.ConvertType(Ctx.VoidPtrTy)); + VoidPtrPtrTy = VoidPtrTy->getPointerTo(); } llvm::Constant *CGNVCUDARuntime::getSetupArgumentFn() const { @@ -68,14 +104,17 @@ llvm::Constant *CGNVCUDARuntime::getSetupArgumentFn() const { llvm::Constant *CGNVCUDARuntime::getLaunchFn() const { // cudaError_t cudaLaunch(char *) - std::vector<llvm::Type*> Params; - Params.push_back(CharPtrTy); - return CGM.CreateRuntimeFunction(llvm::FunctionType::get(IntTy, - Params, false), - "cudaLaunch"); + return CGM.CreateRuntimeFunction( + llvm::FunctionType::get(IntTy, CharPtrTy, false), "cudaLaunch"); +} + +void CGNVCUDARuntime::emitDeviceStub(CodeGenFunction &CGF, + FunctionArgList &Args) { + EmittedKernels.push_back(CGF.CurFn); + emitDeviceStubBody(CGF, Args); } -void CGNVCUDARuntime::EmitDeviceStubBody(CodeGenFunction &CGF, +void CGNVCUDARuntime::emitDeviceStubBody(CodeGenFunction &CGF, FunctionArgList &Args) { // Build the argument value list and the argument stack struct type. SmallVector<llvm::Value *, 16> ArgValues; @@ -87,8 +126,7 @@ void CGNVCUDARuntime::EmitDeviceStubBody(CodeGenFunction &CGF, assert(isa<llvm::PointerType>(V->getType()) && "Arg type not PointerType"); ArgTypes.push_back(cast<llvm::PointerType>(V->getType())->getElementType()); } - llvm::StructType *ArgStackTy = llvm::StructType::get( - CGF.getLLVMContext(), ArgTypes); + llvm::StructType *ArgStackTy = llvm::StructType::get(Context, ArgTypes); llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end"); @@ -120,6 +158,160 @@ void CGNVCUDARuntime::EmitDeviceStubBody(CodeGenFunction &CGF, CGF.EmitBlock(EndBlock); } +/// Creates internal function to register all kernel stubs generated in this +/// module with the CUDA runtime. +/// \code +/// void __cuda_register_kernels(void** GpuBinaryHandle) { +/// __cudaRegisterFunction(GpuBinaryHandle,Kernel0,...); +/// ... +/// __cudaRegisterFunction(GpuBinaryHandle,KernelM,...); +/// } +/// \endcode +llvm::Function *CGNVCUDARuntime::makeRegisterKernelsFn() { + llvm::Function *RegisterKernelsFunc = llvm::Function::Create( + llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false), + llvm::GlobalValue::InternalLinkage, "__cuda_register_kernels", &TheModule); + llvm::BasicBlock *EntryBB = + llvm::BasicBlock::Create(Context, "entry", RegisterKernelsFunc); + CGBuilderTy Builder(Context); + Builder.SetInsertPoint(EntryBB); + + // void __cudaRegisterFunction(void **, const char *, char *, const char *, + // int, uint3*, uint3*, dim3*, dim3*, int*) + std::vector<llvm::Type *> RegisterFuncParams = { + VoidPtrPtrTy, CharPtrTy, CharPtrTy, CharPtrTy, IntTy, + VoidPtrTy, VoidPtrTy, VoidPtrTy, VoidPtrTy, IntTy->getPointerTo()}; + llvm::Constant *RegisterFunc = CGM.CreateRuntimeFunction( + llvm::FunctionType::get(IntTy, RegisterFuncParams, false), + "__cudaRegisterFunction"); + + // Extract GpuBinaryHandle passed as the first argument passed to + // __cuda_register_kernels() and generate __cudaRegisterFunction() call for + // each emitted kernel. + llvm::Argument &GpuBinaryHandlePtr = *RegisterKernelsFunc->arg_begin(); + for (llvm::Function *Kernel : EmittedKernels) { + llvm::Constant *KernelName = makeConstantString(Kernel->getName()); + llvm::Constant *NullPtr = llvm::ConstantPointerNull::get(VoidPtrTy); + llvm::Value *args[] = { + &GpuBinaryHandlePtr, Builder.CreateBitCast(Kernel, VoidPtrTy), + KernelName, KernelName, llvm::ConstantInt::get(IntTy, -1), NullPtr, + NullPtr, NullPtr, NullPtr, + llvm::ConstantPointerNull::get(IntTy->getPointerTo())}; + Builder.CreateCall(RegisterFunc, args); + } + + Builder.CreateRetVoid(); + return RegisterKernelsFunc; +} + +/// Creates a global constructor function for the module: +/// \code +/// void __cuda_module_ctor(void*) { +/// Handle0 = __cudaRegisterFatBinary(GpuBinaryBlob0); +/// __cuda_register_kernels(Handle0); +/// ... +/// HandleN = __cudaRegisterFatBinary(GpuBinaryBlobN); +/// __cuda_register_kernels(HandleN); +/// } +/// \endcode +llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() { + // void __cuda_register_kernels(void* handle); + llvm::Function *RegisterKernelsFunc = makeRegisterKernelsFn(); + // void ** __cudaRegisterFatBinary(void *); + llvm::Constant *RegisterFatbinFunc = CGM.CreateRuntimeFunction( + llvm::FunctionType::get(VoidPtrPtrTy, VoidPtrTy, false), + "__cudaRegisterFatBinary"); + // struct { int magic, int version, void * gpu_binary, void * dont_care }; + llvm::StructType *FatbinWrapperTy = + llvm::StructType::get(IntTy, IntTy, VoidPtrTy, VoidPtrTy, nullptr); + + llvm::Function *ModuleCtorFunc = llvm::Function::Create( + llvm::FunctionType::get(VoidTy, VoidPtrTy, false), + llvm::GlobalValue::InternalLinkage, "__cuda_module_ctor", &TheModule); + llvm::BasicBlock *CtorEntryBB = + llvm::BasicBlock::Create(Context, "entry", ModuleCtorFunc); + CGBuilderTy CtorBuilder(Context); + + CtorBuilder.SetInsertPoint(CtorEntryBB); + + // For each GPU binary, register it with the CUDA runtime and store returned + // handle in a global variable and save the handle in GpuBinaryHandles vector + // to be cleaned up in destructor on exit. Then associate all known kernels + // with the GPU binary handle so CUDA runtime can figure out what to call on + // the GPU side. + for (const std::string &GpuBinaryFileName : + CGM.getCodeGenOpts().CudaGpuBinaryFileNames) { + llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> GpuBinaryOrErr = + llvm::MemoryBuffer::getFileOrSTDIN(GpuBinaryFileName); + if (std::error_code EC = GpuBinaryOrErr.getError()) { + CGM.getDiags().Report(diag::err_cannot_open_file) << GpuBinaryFileName + << EC.message(); + continue; + } + + // Create initialized wrapper structure that points to the loaded GPU binary + llvm::Constant *Values[] = { + llvm::ConstantInt::get(IntTy, 0x466243b1), // Fatbin wrapper magic. + llvm::ConstantInt::get(IntTy, 1), // Fatbin version. + makeConstantString(GpuBinaryOrErr.get()->getBuffer(), "", 16), // Data. + llvm::ConstantPointerNull::get(VoidPtrTy)}; // Unused in fatbin v1. + llvm::GlobalVariable *FatbinWrapper = new llvm::GlobalVariable( + TheModule, FatbinWrapperTy, true, llvm::GlobalValue::InternalLinkage, + llvm::ConstantStruct::get(FatbinWrapperTy, Values), + "__cuda_fatbin_wrapper"); + + // GpuBinaryHandle = __cudaRegisterFatBinary(&FatbinWrapper); + llvm::CallInst *RegisterFatbinCall = CtorBuilder.CreateCall( + RegisterFatbinFunc, + CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy)); + llvm::GlobalVariable *GpuBinaryHandle = new llvm::GlobalVariable( + TheModule, VoidPtrPtrTy, false, llvm::GlobalValue::InternalLinkage, + llvm::ConstantPointerNull::get(VoidPtrPtrTy), "__cuda_gpubin_handle"); + CtorBuilder.CreateStore(RegisterFatbinCall, GpuBinaryHandle, false); + + // Call __cuda_register_kernels(GpuBinaryHandle); + CtorBuilder.CreateCall(RegisterKernelsFunc, RegisterFatbinCall); + + // Save GpuBinaryHandle so we can unregister it in destructor. + GpuBinaryHandles.push_back(GpuBinaryHandle); + } + + CtorBuilder.CreateRetVoid(); + return ModuleCtorFunc; +} + +/// Creates a global destructor function that unregisters all GPU code blobs +/// registered by constructor. +/// \code +/// void __cuda_module_dtor(void*) { +/// __cudaUnregisterFatBinary(Handle0); +/// ... +/// __cudaUnregisterFatBinary(HandleN); +/// } +/// \endcode +llvm::Function *CGNVCUDARuntime::makeModuleDtorFunction() { + // void __cudaUnregisterFatBinary(void ** handle); + llvm::Constant *UnregisterFatbinFunc = CGM.CreateRuntimeFunction( + llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false), + "__cudaUnregisterFatBinary"); + + llvm::Function *ModuleDtorFunc = llvm::Function::Create( + llvm::FunctionType::get(VoidTy, VoidPtrTy, false), + llvm::GlobalValue::InternalLinkage, "__cuda_module_dtor", &TheModule); + llvm::BasicBlock *DtorEntryBB = + llvm::BasicBlock::Create(Context, "entry", ModuleDtorFunc); + CGBuilderTy DtorBuilder(Context); + DtorBuilder.SetInsertPoint(DtorEntryBB); + + for (llvm::GlobalVariable *GpuBinaryHandle : GpuBinaryHandles) { + DtorBuilder.CreateCall(UnregisterFatbinFunc, + DtorBuilder.CreateLoad(GpuBinaryHandle, false)); + } + + DtorBuilder.CreateRetVoid(); + return ModuleDtorFunc; +} + CGCUDARuntime *CodeGen::CreateNVCUDARuntime(CodeGenModule &CGM) { return new CGNVCUDARuntime(CGM); } diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCUDARuntime.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCUDARuntime.h index 8c162fb..dcacf97 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGCUDARuntime.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCUDARuntime.h @@ -16,6 +16,10 @@ #ifndef LLVM_CLANG_LIB_CODEGEN_CGCUDARUNTIME_H #define LLVM_CLANG_LIB_CODEGEN_CGCUDARUNTIME_H +namespace llvm { +class Function; +} + namespace clang { class CUDAKernelCallExpr; @@ -39,10 +43,17 @@ public: virtual RValue EmitCUDAKernelCallExpr(CodeGenFunction &CGF, const CUDAKernelCallExpr *E, ReturnValueSlot ReturnValue); - - virtual void EmitDeviceStubBody(CodeGenFunction &CGF, - FunctionArgList &Args) = 0; + /// Emits a kernel launch stub. + virtual void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) = 0; + + /// Constructs and returns a module initialization function or nullptr if it's + /// not needed. Must be called after all kernels have been emitted. + virtual llvm::Function *makeModuleCtorFunction() = 0; + + /// Returns a module cleanup function or nullptr if it's not needed. + /// Must be called after ModuleCtorFunction + virtual llvm::Function *makeModuleDtorFunction() = 0; }; /// Creates an instance of a CUDA runtime class. diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp index 9f0e67e..29a199d 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp @@ -182,8 +182,8 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl, return true; // Create the alias with no name. - auto *Alias = llvm::GlobalAlias::create(AliasType->getElementType(), 0, - Linkage, "", Aliasee, &getModule()); + auto *Alias = + llvm::GlobalAlias::create(AliasType, Linkage, "", Aliasee, &getModule()); // Switch any previous uses to the alias. if (Entry) { @@ -231,8 +231,7 @@ llvm::GlobalValue *CodeGenModule::getAddrOfCXXStructor( if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { GD = GlobalDecl(CD, toCXXCtorType(Type)); } else { - auto *DD = dyn_cast<CXXDestructorDecl>(MD); - GD = GlobalDecl(DD, toCXXDtorType(Type)); + GD = GlobalDecl(cast<CXXDestructorDecl>(MD), toCXXDtorType(Type)); } StringRef Name = getMangledName(GD); diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp index d31331d..cb7e6df 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp @@ -302,3 +302,10 @@ CGCXXABI::EmitCtorCompleteObjectHandler(CodeGenFunction &CGF, bool CGCXXABI::NeedsVTTParameter(GlobalDecl GD) { return false; } + +llvm::CallInst * +CGCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF, + llvm::Value *Exn) { + // Just call std::terminate and ignore the violating exception. + return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn()); +} diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h index cc5c1b2..2c73921 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h @@ -22,6 +22,7 @@ namespace llvm { class Constant; class Type; class Value; +class CallInst; } namespace clang { @@ -214,8 +215,18 @@ public: llvm::Value *Ptr, QualType ElementType, const CXXDestructorDecl *Dtor) = 0; virtual void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) = 0; + virtual void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) = 0; + virtual llvm::GlobalVariable *getThrowInfo(QualType T) { return nullptr; } + + virtual void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) = 0; + + virtual llvm::CallInst * + emitTerminateForUnexpectedException(CodeGenFunction &CGF, + llvm::Value *Exn); virtual llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) = 0; + virtual llvm::Constant * + getAddrOfCXXCatchHandlerType(QualType Ty, QualType CatchHandlerType) = 0; virtual bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) = 0; diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp index 6403fa9..f234053 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp @@ -30,7 +30,9 @@ #include "llvm/IR/DataLayout.h" #include "llvm/IR/InlineAsm.h" #include "llvm/IR/Intrinsics.h" +#include "llvm/IR/IntrinsicInst.h" #include "llvm/Transforms/Utils/Local.h" +#include <sstream> using namespace clang; using namespace CodeGen; @@ -51,6 +53,8 @@ static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { case CC_X86Pascal: return llvm::CallingConv::C; // TODO: Add support for __vectorcall to LLVM. case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; + case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; + case CC_SpirKernel: return llvm::CallingConv::SPIR_KERNEL; } } @@ -96,8 +100,7 @@ arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, CanQual<FunctionProtoType> FTP) { RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); // FIXME: Kill copy. - for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i) - prefix.push_back(FTP->getParamType(i)); + prefix.append(FTP->param_type_begin(), FTP->param_type_end()); CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, /*chainCall=*/false, prefix, @@ -133,9 +136,6 @@ static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) { if (PcsAttr *PCS = D->getAttr<PcsAttr>()) return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); - if (D->hasAttr<PnaclCallAttr>()) - return CC_PnaclCall; - if (D->hasAttr<IntelOclBiccAttr>()) return CC_IntelOclBicc; @@ -208,8 +208,7 @@ CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, CanQual<FunctionProtoType> FTP = GetFormalType(MD); // Add the formal parameters. - for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i) - argTypes.push_back(FTP->getParamType(i)); + argTypes.append(FTP->param_type_begin(), FTP->param_type_end()); TheCXXABI.buildStructorSignature(MD, Type, argTypes); @@ -349,6 +348,26 @@ CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) { FTP->getExtInfo(), RequiredArgs(1)); } +const CGFunctionInfo & +CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, + CXXCtorType CT) { + assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); + + CanQual<FunctionProtoType> FTP = GetFormalType(CD); + SmallVector<CanQualType, 2> ArgTys; + const CXXRecordDecl *RD = CD->getParent(); + ArgTys.push_back(GetThisType(Context, RD)); + if (CT == Ctor_CopyingClosure) + ArgTys.push_back(*FTP->param_type_begin()); + if (RD->getNumVBases() > 0) + ArgTys.push_back(Context.IntTy); + CallingConv CC = Context.getDefaultCallingConvention( + /*IsVariadic=*/false, /*IsCXXMethod=*/true); + return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, + /*chainCall=*/false, ArgTys, + FunctionType::ExtInfo(CC), RequiredArgs::All); +} + /// Arrange a call as unto a free function, except possibly with an /// additional number of formal parameters considered required. static const CGFunctionInfo & @@ -716,7 +735,8 @@ void CodeGenFunction::ExpandTypeFromArgs( auto Exp = getTypeExpansion(Ty, getContext()); if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { for (int i = 0, n = CAExp->NumElts; i < n; i++) { - llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, i); + llvm::Value *EltAddr = + Builder.CreateConstGEP2_32(nullptr, LV.getAddress(), 0, i); LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); ExpandTypeFromArgs(CAExp->EltTy, LV, AI); } @@ -738,10 +758,12 @@ void CodeGenFunction::ExpandTypeFromArgs( ExpandTypeFromArgs(FD->getType(), SubLV, AI); } } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { - llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real"); + llvm::Value *RealAddr = + Builder.CreateStructGEP(nullptr, LV.getAddress(), 0, "real"); EmitStoreThroughLValue(RValue::get(*AI++), MakeAddrLValue(RealAddr, CExp->EltTy)); - llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag"); + llvm::Value *ImagAddr = + Builder.CreateStructGEP(nullptr, LV.getAddress(), 1, "imag"); EmitStoreThroughLValue(RValue::get(*AI++), MakeAddrLValue(ImagAddr, CExp->EltTy)); } else { @@ -757,7 +779,7 @@ void CodeGenFunction::ExpandTypeToArgs( if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { llvm::Value *Addr = RV.getAggregateAddr(); for (int i = 0, n = CAExp->NumElts; i < n; i++) { - llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, i); + llvm::Value *EltAddr = Builder.CreateConstGEP2_32(nullptr, Addr, 0, i); RValue EltRV = convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()); ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos); @@ -825,7 +847,7 @@ EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr, return SrcPtr; // GEP into the first element. - SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive"); + SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcSTy, SrcPtr, 0, 0, "coerce.dive"); // If the first element is a struct, recurse. llvm::Type *SrcTy = @@ -963,7 +985,7 @@ static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) { for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { - llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i); + llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(STy, DestPtr, 0, i); llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile); @@ -1403,7 +1425,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); } - if (TargetDecl->hasAttr<MallocAttr>()) + if (TargetDecl->hasAttr<RestrictAttr>()) RetAttrs.addAttribute(llvm::Attribute::NoAlias); if (TargetDecl->hasAttr<ReturnsNonNullAttr>()) RetAttrs.addAttribute(llvm::Attribute::NonNull); @@ -1458,6 +1480,26 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, if (!CodeGenOpts.StackRealignment) FuncAttrs.addAttribute("no-realign-stack"); + + // Add target-cpu and target-features work if they differ from the defaults. + std::string &CPU = getTarget().getTargetOpts().CPU; + if (CPU != "") + FuncAttrs.addAttribute("target-cpu", CPU); + + // TODO: Features gets us the features on the command line including + // feature dependencies. For canonicalization purposes we might want to + // avoid putting features in the target-features set if we know it'll be one + // of the default features in the backend, e.g. corei7-avx and +avx or figure + // out non-explicit dependencies. + std::vector<std::string> &Features = getTarget().getTargetOpts().Features; + if (!Features.empty()) { + std::stringstream S; + std::copy(Features.begin(), Features.end(), + std::ostream_iterator<std::string>(S, ",")); + // The drop_back gets rid of the trailing space. + FuncAttrs.addAttribute("target-features", + StringRef(S.str()).drop_back(1)); + } } ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); @@ -1546,8 +1588,12 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, case ABIArgInfo::Extend: if (ParamType->isSignedIntegerOrEnumerationType()) Attrs.addAttribute(llvm::Attribute::SExt); - else if (ParamType->isUnsignedIntegerOrEnumerationType()) - Attrs.addAttribute(llvm::Attribute::ZExt); + else if (ParamType->isUnsignedIntegerOrEnumerationType()) { + if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType)) + Attrs.addAttribute(llvm::Attribute::SExt); + else + Attrs.addAttribute(llvm::Attribute::ZExt); + } // FALL THROUGH case ABIArgInfo::Direct: if (ArgNo == 0 && FI.isChainCall()) @@ -1734,8 +1780,9 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, switch (ArgI.getKind()) { case ABIArgInfo::InAlloca: { assert(NumIRArgs == 0); - llvm::Value *V = Builder.CreateStructGEP( - ArgStruct, ArgI.getInAllocaFieldIndex(), Arg->getName()); + llvm::Value *V = + Builder.CreateStructGEP(FI.getArgStruct(), ArgStruct, + ArgI.getInAllocaFieldIndex(), Arg->getName()); ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); break; } @@ -1770,8 +1817,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); } else { // Load scalar value from indirect argument. - CharUnits Alignment = getContext().getTypeAlignInChars(Ty); - V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty, + V = EmitLoadOfScalar(V, false, ArgI.getIndirectAlign(), Ty, Arg->getLocStart()); if (isPromoted) @@ -1901,7 +1947,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, // If the value is offset in memory, apply the offset now. if (unsigned Offs = ArgI.getDirectOffset()) { Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy()); - Ptr = Builder.CreateConstGEP1_32(Ptr, Offs); + Ptr = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), Ptr, Offs); Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(ArgI.getCoerceToType())); } @@ -1923,7 +1969,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { auto AI = FnArgs[FirstIRArg + i]; AI->setName(Arg->getName() + ".coerce" + Twine(i)); - llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i); + llvm::Value *EltPtr = Builder.CreateConstGEP2_32(STy, Ptr, 0, i); Builder.CreateStore(AI, EltPtr); } } else { @@ -1936,7 +1982,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { auto AI = FnArgs[FirstIRArg + i]; AI->setName(Arg->getName() + ".coerce" + Twine(i)); - llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i); + llvm::Value *EltPtr = + Builder.CreateConstGEP2_32(ArgI.getCoerceToType(), TempV, 0, i); Builder.CreateStore(AI, EltPtr); } @@ -2173,7 +2220,29 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { if (!CGF.ReturnValue->hasOneUse()) { llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); if (IP->empty()) return nullptr; - llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back()); + llvm::Instruction *I = &IP->back(); + + // Skip lifetime markers + for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(), + IE = IP->rend(); + II != IE; ++II) { + if (llvm::IntrinsicInst *Intrinsic = + dyn_cast<llvm::IntrinsicInst>(&*II)) { + if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) { + const llvm::Value *CastAddr = Intrinsic->getArgOperand(1); + ++II; + if (isa<llvm::BitCastInst>(&*II)) { + if (CastAddr == &*II) { + continue; + } + } + } + } + I = &*II; + break; + } + + llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(I); if (!store) return nullptr; if (store->getPointerOperand() != CGF.ReturnValue) return nullptr; assert(!store->isAtomic() && !store->isVolatile()); // see below @@ -2231,8 +2300,8 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, llvm::Function::arg_iterator EI = CurFn->arg_end(); --EI; llvm::Value *ArgStruct = EI; - llvm::Value *SRet = - Builder.CreateStructGEP(ArgStruct, RetAI.getInAllocaFieldIndex()); + llvm::Value *SRet = Builder.CreateStructGEP( + nullptr, ArgStruct, RetAI.getInAllocaFieldIndex()); RV = Builder.CreateLoad(SRet, "sret"); } break; @@ -2271,7 +2340,8 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, // If there is a dominating store to ReturnValue, we can elide // the load, zap the store, and usually zap the alloca. - if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) { + if (llvm::StoreInst *SI = + findDominatingStoreToReturnValue(*this)) { // Reuse the debug location from the store unless there is // cleanup code to be emitted between the store and return // instruction. @@ -2296,7 +2366,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, // If the value is offset in memory, apply the offset now. if (unsigned Offs = RetAI.getDirectOffset()) { V = Builder.CreateBitCast(V, Builder.getInt8PtrTy()); - V = Builder.CreateConstGEP1_32(V, Offs); + V = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), V, Offs); V = Builder.CreateBitCast(V, llvm::PointerType::getUnqual(RetAI.getCoerceToType())); } @@ -2342,8 +2412,8 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, Ret = Builder.CreateRetVoid(); } - if (!RetDbgLoc.isUnknown()) - Ret->setDebugLoc(RetDbgLoc); + if (RetDbgLoc) + Ret->setDebugLoc(std::move(RetDbgLoc)); } static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { @@ -2626,7 +2696,7 @@ void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { // Save the stack. llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); - StackBase = CGF.Builder.CreateCall(F, "inalloca.save"); + StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save"); // Control gets really tied up in landing pads, so we have to spill the // stacksave to an alloca to avoid violating SSA form. @@ -2678,12 +2748,7 @@ void CodeGenFunction::EmitCallArgs(CallArgList &Args, CallExpr::const_arg_iterator ArgBeg, CallExpr::const_arg_iterator ArgEnd, const FunctionDecl *CalleeDecl, - unsigned ParamsToSkip, - bool ForceColumnInfo) { - CGDebugInfo *DI = getDebugInfo(); - SourceLocation CallLoc; - if (DI) CallLoc = DI->getLocation(); - + unsigned ParamsToSkip) { // We *have* to evaluate arguments from right to left in the MS C++ ABI, // because arguments are destroyed left to right in the callee. if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { @@ -2704,8 +2769,6 @@ void CodeGenFunction::EmitCallArgs(CallArgList &Args, EmitCallArg(Args, *Arg, ArgTypes[I]); emitNonNullArgCheck(*this, Args.back().RV, ArgTypes[I], Arg->getExprLoc(), CalleeDecl, ParamsToSkip + I); - // Restore the debug location. - if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo); } // Un-reverse the arguments we just evaluated so they match up with the LLVM @@ -2720,8 +2783,6 @@ void CodeGenFunction::EmitCallArgs(CallArgList &Args, EmitCallArg(Args, *Arg, ArgTypes[I]); emitNonNullArgCheck(*this, Args.back().RV, ArgTypes[I], Arg->getExprLoc(), CalleeDecl, ParamsToSkip + I); - // Restore the debug location. - if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo); } } @@ -2744,8 +2805,22 @@ struct DestroyUnpassedArg : EHScopeStack::Cleanup { } +struct DisableDebugLocationUpdates { + CodeGenFunction &CGF; + bool disabledDebugInfo; + DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { + if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo())) + CGF.disableDebugInfo(); + } + ~DisableDebugLocationUpdates() { + if (disabledDebugInfo) + CGF.enableDebugInfo(); + } +}; + void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, QualType type) { + DisableDebugLocationUpdates Dis(*this, E); if (const ObjCIndirectCopyRestoreExpr *CRE = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { assert(getLangOpts().ObjCAutoRefCount); @@ -2900,7 +2975,6 @@ void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, call->setCallingConv(getRuntimeCC()); Builder.CreateUnreachable(); } - PGO.setCurrentRegionUnreachable(); } /// Emits a call or invoke instruction to the given nullary runtime @@ -2949,7 +3023,7 @@ CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, if (CGM.getLangOpts().ObjCAutoRefCount) AddObjCARCExceptionMetadata(Inst); - return Inst; + return llvm::CallSite(Inst); } /// \brief Store a non-aggregate value to an address to initialize it. For @@ -2986,7 +3060,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, // If we're using inalloca, insert the allocation after the stack save. // FIXME: Do this earlier rather than hacking it in here! - llvm::Value *ArgMemory = nullptr; + llvm::AllocaInst *ArgMemory = nullptr; if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { llvm::Instruction *IP = CallArgs.getStackBase(); llvm::AllocaInst *AI; @@ -3015,7 +3089,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr; } else { llvm::Value *Addr = - Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex()); + Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory, + RetAI.getInAllocaFieldIndex()); Builder.CreateStore(SRetPtr, Addr); } } @@ -3049,14 +3124,16 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, cast<llvm::Instruction>(RV.getAggregateAddr()); CGBuilderTy::InsertPoint IP = Builder.saveIP(); Builder.SetInsertPoint(Placeholder); - llvm::Value *Addr = Builder.CreateStructGEP( - ArgMemory, ArgInfo.getInAllocaFieldIndex()); + llvm::Value *Addr = + Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory, + ArgInfo.getInAllocaFieldIndex()); Builder.restoreIP(IP); deferPlaceholderReplacement(Placeholder, Addr); } else { // Store the RValue into the argument struct. llvm::Value *Addr = - Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); + Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory, + ArgInfo.getInAllocaFieldIndex()); unsigned AS = Addr->getType()->getPointerAddressSpace(); llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS); // There are some cases where a trivial bitcast is not avoidable. The @@ -3100,8 +3177,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, : 0); if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align && - llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align) || - (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) { + llvm::getOrEnforceKnownAlignment(Addr, Align, *TD) < Align) || + (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) { // Create an aligned temporary, and copy to it. llvm::AllocaInst *AI = CreateMemTemp(I->Ty); if (Align > AI->getAlignment()) @@ -3158,7 +3235,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, // If the value is offset in memory, apply the offset now. if (unsigned Offs = ArgInfo.getDirectOffset()) { SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy()); - SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs); + SrcPtr = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), SrcPtr, Offs); SrcPtr = Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(ArgInfo.getCoerceToType())); @@ -3190,7 +3267,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, assert(NumIRArgs == STy->getNumElements()); for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { - llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i); + llvm::Value *EltPtr = Builder.CreateConstGEP2_32(STy, SrcPtr, 0, i); llvm::LoadInst *LI = Builder.CreateLoad(EltPtr); // We don't know what we're loading from. LI->setAlignment(1); @@ -3300,7 +3377,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, llvm::BasicBlock *InvokeDest = nullptr; if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex, - llvm::Attribute::NoUnwind)) + llvm::Attribute::NoUnwind) || + currentFunctionUsesSEHTry()) InvokeDest = getInvokeDest(); llvm::CallSite CS; @@ -3320,6 +3398,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex, llvm::Attribute::AlwaysInline); + // Disable inlining inside SEH __try blocks. + if (isSEHTryScope()) + Attrs = + Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex, + llvm::Attribute::NoInline); + CS.setAttributes(Attrs); CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); @@ -3413,7 +3497,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, llvm::Value *StorePtr = DestPtr; if (unsigned Offs = RetAI.getDirectOffset()) { StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy()); - StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs); + StorePtr = + Builder.CreateConstGEP1_32(Builder.getInt8Ty(), StorePtr, Offs); StorePtr = Builder.CreateBitCast(StorePtr, llvm::PointerType::getUnqual(RetAI.getCoerceToType())); } diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp index d72eda9..cd75da2 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp @@ -24,35 +24,36 @@ #include "clang/Basic/TargetBuiltins.h" #include "clang/CodeGen/CGFunctionInfo.h" #include "clang/Frontend/CodeGenOptions.h" +#include "llvm/IR/Intrinsics.h" using namespace clang; using namespace CodeGen; -static CharUnits -ComputeNonVirtualBaseClassOffset(ASTContext &Context, +static CharUnits +ComputeNonVirtualBaseClassOffset(ASTContext &Context, const CXXRecordDecl *DerivedClass, CastExpr::path_const_iterator Start, CastExpr::path_const_iterator End) { CharUnits Offset = CharUnits::Zero(); - + const CXXRecordDecl *RD = DerivedClass; - + for (CastExpr::path_const_iterator I = Start; I != End; ++I) { const CXXBaseSpecifier *Base = *I; assert(!Base->isVirtual() && "Should not see virtual bases here!"); // Get the layout. const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); - - const CXXRecordDecl *BaseDecl = + + const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); - + // Add the offset. Offset += Layout.getBaseClassOffset(BaseDecl); - + RD = BaseDecl; } - + return Offset; } @@ -62,15 +63,15 @@ CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl, CastExpr::path_const_iterator PathEnd) { assert(PathBegin != PathEnd && "Base path should not be empty!"); - CharUnits Offset = + CharUnits Offset = ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl, PathBegin, PathEnd); if (Offset.isZero()) return nullptr; - llvm::Type *PtrDiffTy = + llvm::Type *PtrDiffTy = Types.ConvertType(getContext().getPointerDiffType()); - + return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity()); } @@ -127,7 +128,7 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr, } else { baseOffset = virtualOffset; } - + // Apply the base offset. ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy); ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr"); @@ -149,7 +150,7 @@ llvm::Value *CodeGenFunction::GetAddressOfBaseClass( // *start* with a step down to the correct virtual base subobject, // and hence will not require any further steps. if ((*Start)->isVirtual()) { - VBase = + VBase = cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl()); ++Start; } @@ -157,7 +158,7 @@ llvm::Value *CodeGenFunction::GetAddressOfBaseClass( // Compute the static offset of the ultimate destination within its // allocating subobject (the virtual base, if there is one, or else // the "complete" object that we see). - CharUnits NonVirtualOffset = + CharUnits NonVirtualOffset = ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived, Start, PathEnd); @@ -172,7 +173,7 @@ llvm::Value *CodeGenFunction::GetAddressOfBaseClass( } // Get the base pointer type. - llvm::Type *BasePtrTy = + llvm::Type *BasePtrTy = ConvertType((PathEnd[-1])->getType())->getPointerTo(); QualType DerivedTy = getContext().getRecordType(Derived); @@ -197,7 +198,7 @@ llvm::Value *CodeGenFunction::GetAddressOfBaseClass( origBB = Builder.GetInsertBlock(); llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull"); endBB = createBasicBlock("cast.end"); - + llvm::Value *isNull = Builder.CreateIsNull(Value); Builder.CreateCondBr(isNull, endBB, notNullBB); EmitBlock(notNullBB); @@ -216,10 +217,10 @@ llvm::Value *CodeGenFunction::GetAddressOfBaseClass( } // Apply both offsets. - Value = ApplyNonVirtualAndVirtualOffset(*this, Value, + Value = ApplyNonVirtualAndVirtualOffset(*this, Value, NonVirtualOffset, VirtualOffset); - + // Cast to the destination type. Value = Builder.CreateBitCast(Value, BasePtrTy); @@ -228,13 +229,13 @@ llvm::Value *CodeGenFunction::GetAddressOfBaseClass( llvm::BasicBlock *notNullBB = Builder.GetInsertBlock(); Builder.CreateBr(endBB); EmitBlock(endBB); - + llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result"); PHI->addIncoming(Value, notNullBB); PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB); Value = PHI; } - + return Value; } @@ -252,7 +253,7 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value, llvm::Value *NonVirtualOffset = CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd); - + if (!NonVirtualOffset) { // No offset, we can just cast back. return Builder.CreateBitCast(Value, DerivedPtrTy); @@ -266,12 +267,12 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value, CastNull = createBasicBlock("cast.null"); CastNotNull = createBasicBlock("cast.notnull"); CastEnd = createBasicBlock("cast.end"); - + llvm::Value *IsNull = Builder.CreateIsNull(Value); Builder.CreateCondBr(IsNull, CastNull, CastNotNull); EmitBlock(CastNotNull); } - + // Apply the offset. Value = Builder.CreateBitCast(Value, Int8PtrTy); Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset), @@ -285,14 +286,14 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value, EmitBlock(CastNull); Builder.CreateBr(CastEnd); EmitBlock(CastEnd); - + llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2); PHI->addIncoming(Value, CastNotNull); - PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), + PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull); Value = PHI; } - + return Value; } @@ -303,7 +304,7 @@ llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD, // This constructor/destructor does not need a VTT parameter. return nullptr; } - + const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurCodeDecl)->getParent(); const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent(); @@ -323,15 +324,15 @@ llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD, SubVTTIndex = 0; } else { const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); - CharUnits BaseOffset = ForVirtualBase ? - Layout.getVBaseClassOffset(Base) : + CharUnits BaseOffset = ForVirtualBase ? + Layout.getVBaseClassOffset(Base) : Layout.getBaseClassOffset(Base); - SubVTTIndex = + SubVTTIndex = CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset)); assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!"); } - + if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { // A VTT parameter was passed to the constructor, use it. VTT = LoadCXXVTT(); @@ -358,7 +359,7 @@ namespace { cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent(); const CXXDestructorDecl *D = BaseClass->getDestructor(); - llvm::Value *Addr = + llvm::Value *Addr = CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(), DerivedClass, BaseClass, BaseIsVirtual); @@ -391,7 +392,7 @@ static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) { return Checker.UsesThis; } -static void EmitBaseInitializer(CodeGenFunction &CGF, +static void EmitBaseInitializer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl, CXXCtorInitializer *BaseInit, CXXCtorType CtorType) { @@ -399,7 +400,7 @@ static void EmitBaseInitializer(CodeGenFunction &CGF, "Must have base initializer!"); llvm::Value *ThisPtr = CGF.LoadCXXThis(); - + const Type *BaseType = BaseInit->getBaseClass(); CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl()); @@ -418,7 +419,7 @@ static void EmitBaseInitializer(CodeGenFunction &CGF, // We can pretend to be a complete class because it only matters for // virtual bases, and we only do virtual bases for complete ctors. - llvm::Value *V = + llvm::Value *V = CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl, BaseClassDecl, isBaseVirtual); @@ -430,8 +431,8 @@ static void EmitBaseInitializer(CodeGenFunction &CGF, AggValueSlot::IsNotAliased); CGF.EmitAggExpr(BaseInit->getInit(), AggSlot); - - if (CGF.CGM.getLangOpts().Exceptions && + + if (CGF.CGM.getLangOpts().Exceptions && !BaseClassDecl->hasTrivialDestructor()) CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl, isBaseVirtual); @@ -490,17 +491,17 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF, llvm::Value *IndexVar = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]); assert(IndexVar && "Array index variable not loaded"); - + // Initialize this index variable to zero. llvm::Value* Zero = llvm::Constant::getNullValue( CGF.ConvertType(CGF.getContext().getSizeType())); CGF.Builder.CreateStore(Zero, IndexVar); - + // Start the loop with a block that tests the condition. llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond"); llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end"); - + CGF.EmitBlock(CondBlock); llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body"); @@ -512,7 +513,7 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF, llvm::ConstantInt::get(Counter->getType(), NumElements); llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr, "isless"); - + // If the condition is true, execute the body. CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor); @@ -539,6 +540,23 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF, CGF.EmitBlock(AfterFor, true); } +static bool isMemcpyEquivalentSpecialMember(const CXXMethodDecl *D) { + auto *CD = dyn_cast<CXXConstructorDecl>(D); + if (!(CD && CD->isCopyOrMoveConstructor()) && + !D->isCopyAssignmentOperator() && !D->isMoveAssignmentOperator()) + return false; + + // We can emit a memcpy for a trivial copy or move constructor/assignment. + if (D->isTrivial() && !D->getParent()->mayInsertExtraPadding()) + return true; + + // We *must* emit a memcpy for a defaulted union copy or move op. + if (D->getParent()->isUnion() && D->isDefaulted()) + return true; + + return false; +} + static void EmitMemberInitializer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl, CXXCtorInitializer *MemberInit, @@ -548,7 +566,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF, assert(MemberInit->isAnyMemberInitializer() && "Must have member initializer!"); assert(MemberInit->getInit() && "Must have initializer!"); - + // non-static data member initializers. FieldDecl *Field = MemberInit->getAnyMember(); QualType FieldType = Field->getType(); @@ -580,14 +598,14 @@ static void EmitMemberInitializer(CodeGenFunction &CGF, QualType BaseElementTy = CGF.getContext().getBaseElementType(Array); CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); if (BaseElementTy.isPODType(CGF.getContext()) || - (CE && CE->getConstructor()->isTrivial())) { + (CE && isMemcpyEquivalentSpecialMember(CE->getConstructor()))) { unsigned SrcArgIndex = CGF.CGM.getCXXABI().getSrcArgforCopyCtor(Constructor, Args); llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex])); LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field); - + // Copy the aggregate. CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType, LHS.isVolatileQualified()); @@ -621,28 +639,28 @@ void CodeGenFunction::EmitInitializerForField( llvm::Value *ArrayIndexVar = nullptr; if (ArrayIndexes.size()) { llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); - + // The LHS is a pointer to the first object we'll be constructing, as // a flat array. QualType BaseElementTy = getContext().getBaseElementType(FieldType); llvm::Type *BasePtr = ConvertType(BaseElementTy); BasePtr = llvm::PointerType::getUnqual(BasePtr); - llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(), + llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(), BasePtr); LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy); - + // Create an array index that will be used to walk over all of the // objects we're constructing. ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index"); llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy); Builder.CreateStore(Zero, ArrayIndexVar); - - + + // Emit the block variables for the array indices, if any. for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I) EmitAutoVarDecl(*ArrayIndexes[I]); } - + EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType, ArrayIndexes, 0); } @@ -762,9 +780,9 @@ void CodeGenFunction::EmitAsanPrologueOrEpilogue(bool Prologue) { if (PoisonSize < AsanAlignment || !SSV[i].Size || (NextField % AsanAlignment) != 0) continue; - Builder.CreateCall2( - F, Builder.CreateAdd(ThisPtr, Builder.getIntN(PtrSize, EndOffset)), - Builder.getIntN(PtrSize, PoisonSize)); + Builder.CreateCall( + F, {Builder.CreateAdd(ThisPtr, Builder.getIntN(PtrSize, EndOffset)), + Builder.getIntN(PtrSize, PoisonSize)}); } } @@ -796,8 +814,7 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) { if (IsTryBody) EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); - RegionCounter Cnt = getPGORegionCounter(Body); - Cnt.beginRegion(Builder); + incrementProfileCounter(Body); RunCleanupsScope RunCleanups(*this); @@ -850,7 +867,7 @@ namespace { public: FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl, const VarDecl *SrcRec) - : CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec), + : CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec), RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)), FirstField(nullptr), LastField(nullptr), FirstFieldOffset(0), LastFieldOffset(0), LastAddedFieldIndex(0) {} @@ -876,7 +893,7 @@ namespace { unsigned LastFieldSize = LastField->isBitField() ? LastField->getBitWidthValue(CGF.getContext()) : - CGF.getContext().getTypeSize(LastField->getType()); + CGF.getContext().getTypeSize(LastField->getType()); uint64_t MemcpySizeBits = LastFieldOffset + LastFieldSize - FirstByteOffset + CGF.getContext().getCharWidth() - 1; @@ -1021,8 +1038,8 @@ namespace { QualType FieldType = Field->getType(); CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); - // Bail out on non-POD, not-trivially-constructable members. - if (!(CE && CE->getConstructor()->isTrivial()) && + // Bail out on non-memcpyable, not-trivially-copyable members. + if (!(CE && isMemcpyEquivalentSpecialMember(CE->getConstructor())) && !(FieldType.isTriviallyCopyableType(CGF.getContext()) || FieldType->isReferenceType())) return false; @@ -1127,9 +1144,7 @@ namespace { return Field; } else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(S)) { CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MCE->getCalleeDecl()); - if (!(MD && (MD->isCopyAssignmentOperator() || - MD->isMoveAssignmentOperator()) && - MD->isTrivial())) + if (!(MD && isMemcpyEquivalentSpecialMember(MD))) return nullptr; MemberExpr *IOA = dyn_cast<MemberExpr>(MCE->getImplicitObjectArgument()); if (!IOA) @@ -1189,7 +1204,7 @@ namespace { if (F) { addMemcpyableField(F); AggregatedStmts.push_back(S); - } else { + } else { emitAggregatedStmts(); CGF.EmitStmt(S); } @@ -1274,7 +1289,7 @@ static bool FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field); static bool -HasTrivialDestructorBody(ASTContext &Context, +HasTrivialDestructorBody(ASTContext &Context, const CXXRecordDecl *BaseClassDecl, const CXXRecordDecl *MostDerivedClassDecl) { @@ -1309,7 +1324,7 @@ HasTrivialDestructorBody(ASTContext &Context, cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); if (!HasTrivialDestructorBody(Context, VirtualBase, MostDerivedClassDecl)) - return false; + return false; } } @@ -1325,7 +1340,7 @@ FieldHasTrivialDestructorBody(ASTContext &Context, const RecordType *RT = FieldBaseElementType->getAs<RecordType>(); if (!RT) return true; - + CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl()); return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl); } @@ -1351,6 +1366,10 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl()); CXXDtorType DtorType = CurGD.getDtorType(); + Stmt *Body = Dtor->getBody(); + if (Body) + incrementProfileCounter(Body); + // The call to operator delete in a deleting destructor happens // outside of the function-try-block, which means it's always // possible to delegate the destructor body to the complete @@ -1363,8 +1382,6 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { return; } - Stmt *Body = Dtor->getBody(); - // If the body is a function-try-block, enter the try before // anything else. bool isTryBody = (Body && isa<CXXTryStmt>(Body)); @@ -1374,11 +1391,11 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { // Enter the epilogue cleanups. RunCleanupsScope DtorEpilogue(*this); - + // If this is the complete variant, just invoke the base variant; // the epilogue will destruct the virtual bases. But we can't do // this optimization if the body is a function-try-block, because - // we'd introduce *two* handler blocks. In the Microsoft ABI, we + // we'd introduce *two* handler blocks. In the Microsoft ABI, we // always delegate because we might not have a definition in this TU. switch (DtorType) { case Dtor_Comdat: @@ -1399,13 +1416,10 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { break; } // Fallthrough: act like we're in the base variant. - + case Dtor_Base: assert(Body); - RegionCounter Cnt = getPGORegionCounter(Body); - Cnt.beginRegion(Builder); - // Enter the cleanup scopes for fields and non-virtual bases. EnterDtorCleanups(Dtor, Dtor_Base); @@ -1447,7 +1461,7 @@ void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) AssignmentMemcpyizer AM(*this, AssignOp, Args); for (auto *I : RootCS->body()) - AM.emitAssignment(I); + AM.emitAssignment(I); AM.finish(); } @@ -1508,7 +1522,7 @@ namespace { LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy); LValue LV = CGF.EmitLValueForField(ThisLV, field); assert(LV.isSimple()); - + CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer, flags.isForNormalCleanup() && useEHCleanupForArray); } @@ -1526,7 +1540,7 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, // The deleting-destructor phase just needs to call the appropriate // operator delete that Sema picked up. if (DtorType == Dtor_Deleting) { - assert(DD->getOperatorDelete() && + assert(DD->getOperatorDelete() && "operator delete missing - EnterDtorCleanups"); if (CXXStructorImplicitParamValue) { // If there is an implicit param to the deleting dtor, it's a boolean @@ -1553,7 +1567,7 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, for (const auto &Base : ClassDecl->vbases()) { CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl()); - + // Ignore trivial destructors. if (BaseClassDecl->hasTrivialDestructor()) continue; @@ -1567,15 +1581,15 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, } assert(DtorType == Dtor_Base); - + // Destroy non-virtual bases. for (const auto &Base : ClassDecl->bases()) { // Ignore virtual bases. if (Base.isVirtual()) continue; - + CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl(); - + // Ignore trivial destructors. if (BaseClassDecl->hasTrivialDestructor()) continue; @@ -1656,7 +1670,7 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB); EmitBlock(loopBB); } - + // Find the end of the array. llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements, "arrayctor.end"); @@ -1676,15 +1690,15 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, // Zero initialize the storage, if requested. if (zeroInitialize) EmitNullInitialization(cur, type); - - // C++ [class.temporary]p4: + + // C++ [class.temporary]p4: // There are two contexts in which temporaries are destroyed at a different // point than the end of the full-expression. The first context is when a - // default constructor is called to initialize an element of an array. - // If the constructor has one or more default arguments, the destruction of - // every temporary created in a default argument expression is sequenced + // default constructor is called to initialize an element of an array. + // If the constructor has one or more default arguments, the destruction of + // every temporary created in a default argument expression is sequenced // before the construction of the next array element, if any. - + { RunCleanupsScope Scope(*this); @@ -1733,33 +1747,32 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D, bool ForVirtualBase, bool Delegating, llvm::Value *This, const CXXConstructExpr *E) { - // If this is a trivial constructor, just emit what's needed. - if (D->isTrivial() && !D->getParent()->mayInsertExtraPadding()) { - if (E->getNumArgs() == 0) { - // Trivial default constructor, no codegen required. - assert(D->isDefaultConstructor() && - "trivial 0-arg ctor not a default ctor"); - return; - } + // C++11 [class.mfct.non-static]p2: + // If a non-static member function of a class X is called for an object that + // is not of type X, or of a type derived from X, the behavior is undefined. + // FIXME: Provide a source location here. + EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, SourceLocation(), This, + getContext().getRecordType(D->getParent())); + + if (D->isTrivial() && D->isDefaultConstructor()) { + assert(E->getNumArgs() == 0 && "trivial default ctor with args"); + return; + } + // If this is a trivial constructor, just emit what's needed. If this is a + // union copy constructor, we must emit a memcpy, because the AST does not + // model that copy. + if (isMemcpyEquivalentSpecialMember(D)) { assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor"); - assert(D->isCopyOrMoveConstructor() && - "trivial 1-arg ctor not a copy/move ctor"); const Expr *Arg = E->getArg(0); - QualType Ty = Arg->getType(); + QualType SrcTy = Arg->getType(); llvm::Value *Src = EmitLValue(Arg).getAddress(); - EmitAggregateCopy(This, Src, Ty); + QualType DestTy = getContext().getTypeDeclType(D->getParent()); + EmitAggregateCopyCtor(This, Src, DestTy, SrcTy); return; } - // C++11 [class.mfct.non-static]p2: - // If a non-static member function of a class X is called for an object that - // is not of type X, or of a type derived from X, the behavior is undefined. - // FIXME: Provide a source location here. - EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, SourceLocation(), This, - getContext().getRecordType(D->getParent())); - CallArgList Args; // Push the this ptr. @@ -1784,25 +1797,26 @@ void CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, llvm::Value *This, llvm::Value *Src, const CXXConstructExpr *E) { - if (D->isTrivial() && - !D->getParent()->mayInsertExtraPadding()) { + if (isMemcpyEquivalentSpecialMember(D)) { assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor"); assert(D->isCopyOrMoveConstructor() && "trivial 1-arg ctor not a copy/move ctor"); - EmitAggregateCopy(This, Src, E->arg_begin()->getType()); + EmitAggregateCopyCtor(This, Src, + getContext().getTypeDeclType(D->getParent()), + E->arg_begin()->getType()); return; } llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, StructorType::Complete); assert(D->isInstance() && "Trying to emit a member call expr on a static method!"); - + const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>(); - + CallArgList Args; - + // Push the this ptr. Args.add(RValue::get(This), D->getThisType(getContext())); - + // Push the src ptr. QualType QT = *(FPT->param_type_begin()); llvm::Type *t = CGM.getTypes().ConvertType(QT); @@ -1945,10 +1959,18 @@ void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) { } void -CodeGenFunction::InitializeVTablePointer(BaseSubobject Base, +CodeGenFunction::InitializeVTablePointer(BaseSubobject Base, const CXXRecordDecl *NearestVBase, CharUnits OffsetFromNearestVBase, const CXXRecordDecl *VTableClass) { + const CXXRecordDecl *RD = Base.getBase(); + + // Don't initialize the vtable pointer if the class is marked with the + // 'novtable' attribute. + if ((RD == VTableClass || RD == NearestVBase) && + VTableClass->hasAttr<MSNoVTableAttr>()) + return; + // Compute the address point. bool NeedsVirtualOffset; llvm::Value *VTableAddressPoint = @@ -1960,7 +1982,7 @@ CodeGenFunction::InitializeVTablePointer(BaseSubobject Base, // Compute where to store the address point. llvm::Value *VirtualOffset = nullptr; CharUnits NonVirtualOffset = CharUnits::Zero(); - + if (NeedsVirtualOffset) { // We need to use the virtual base offset offset because the virtual base // might have a different offset in the most derived class. @@ -1973,12 +1995,12 @@ CodeGenFunction::InitializeVTablePointer(BaseSubobject Base, // We can just use the base offset in the complete class. NonVirtualOffset = Base.getBaseOffset(); } - + // Apply the offsets. llvm::Value *VTableField = LoadCXXThis(); - + if (!NonVirtualOffset.isZero() || VirtualOffset) - VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField, + VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField, NonVirtualOffset, VirtualOffset); @@ -1995,7 +2017,7 @@ CodeGenFunction::InitializeVTablePointer(BaseSubobject Base, } void -CodeGenFunction::InitializeVTablePointers(BaseSubobject Base, +CodeGenFunction::InitializeVTablePointers(BaseSubobject Base, const CXXRecordDecl *NearestVBase, CharUnits OffsetFromNearestVBase, bool BaseIsNonVirtualPrimaryBase, @@ -2008,7 +2030,7 @@ CodeGenFunction::InitializeVTablePointers(BaseSubobject Base, InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase, VTableClass); } - + const CXXRecordDecl *RD = Base.getBase(); // Traverse bases. @@ -2029,7 +2051,7 @@ CodeGenFunction::InitializeVTablePointers(BaseSubobject Base, if (!VBases.insert(BaseDecl).second) continue; - const ASTRecordLayout &Layout = + const ASTRecordLayout &Layout = getContext().getASTRecordLayout(VTableClass); BaseOffset = Layout.getVBaseClassOffset(BaseDecl); @@ -2039,15 +2061,15 @@ CodeGenFunction::InitializeVTablePointers(BaseSubobject Base, const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl); - BaseOffsetFromNearestVBase = + BaseOffsetFromNearestVBase = OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl); BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl; } - - InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset), + + InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset), I.isVirtual() ? BaseDecl : NearestVBase, BaseOffsetFromNearestVBase, - BaseDeclIsNonVirtualPrimaryBase, + BaseDeclIsNonVirtualPrimaryBase, VTableClass, VBases); } } @@ -2059,7 +2081,7 @@ void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) { // Initialize the vtable pointers for this class and all of its bases. VisitedVirtualBasesSetTy VBases; - InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()), + InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()), /*NearestVBase=*/nullptr, /*OffsetFromNearestVBase=*/CharUnits::Zero(), /*BaseIsNonVirtualPrimaryBase=*/false, RD, VBases); @@ -2076,6 +2098,128 @@ llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This, return VTable; } +// If a class has a single non-virtual base and does not introduce or override +// virtual member functions or fields, it will have the same layout as its base. +// This function returns the least derived such class. +// +// Casting an instance of a base class to such a derived class is technically +// undefined behavior, but it is a relatively common hack for introducing member +// functions on class instances with specific properties (e.g. llvm::Operator) +// that works under most compilers and should not have security implications, so +// we allow it by default. It can be disabled with -fsanitize=cfi-cast-strict. +static const CXXRecordDecl * +LeastDerivedClassWithSameLayout(const CXXRecordDecl *RD) { + if (!RD->field_empty()) + return RD; + + if (RD->getNumVBases() != 0) + return RD; + + if (RD->getNumBases() != 1) + return RD; + + for (const CXXMethodDecl *MD : RD->methods()) { + if (MD->isVirtual()) { + // Virtual member functions are only ok if they are implicit destructors + // because the implicit destructor will have the same semantics as the + // base class's destructor if no fields are added. + if (isa<CXXDestructorDecl>(MD) && MD->isImplicit()) + continue; + return RD; + } + } + + return LeastDerivedClassWithSameLayout( + RD->bases_begin()->getType()->getAsCXXRecordDecl()); +} + +void CodeGenFunction::EmitVTablePtrCheckForCall(const CXXMethodDecl *MD, + llvm::Value *VTable) { + const CXXRecordDecl *ClassDecl = MD->getParent(); + if (!SanOpts.has(SanitizerKind::CFICastStrict)) + ClassDecl = LeastDerivedClassWithSameLayout(ClassDecl); + + EmitVTablePtrCheck(ClassDecl, VTable); +} + +void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T, + llvm::Value *Derived, + bool MayBeNull) { + if (!getLangOpts().CPlusPlus) + return; + + auto *ClassTy = T->getAs<RecordType>(); + if (!ClassTy) + return; + + const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(ClassTy->getDecl()); + + if (!ClassDecl->isCompleteDefinition() || !ClassDecl->isDynamicClass()) + return; + + SmallString<64> MangledName; + llvm::raw_svector_ostream Out(MangledName); + CGM.getCXXABI().getMangleContext().mangleCXXRTTI(T.getUnqualifiedType(), + Out); + + // Blacklist based on the mangled type. + if (CGM.getContext().getSanitizerBlacklist().isBlacklistedType(Out.str())) + return; + + if (!SanOpts.has(SanitizerKind::CFICastStrict)) + ClassDecl = LeastDerivedClassWithSameLayout(ClassDecl); + + llvm::BasicBlock *ContBlock = 0; + + if (MayBeNull) { + llvm::Value *DerivedNotNull = + Builder.CreateIsNotNull(Derived, "cast.nonnull"); + + llvm::BasicBlock *CheckBlock = createBasicBlock("cast.check"); + ContBlock = createBasicBlock("cast.cont"); + + Builder.CreateCondBr(DerivedNotNull, CheckBlock, ContBlock); + + EmitBlock(CheckBlock); + } + + llvm::Value *VTable = GetVTablePtr(Derived, Int8PtrTy); + EmitVTablePtrCheck(ClassDecl, VTable); + + if (MayBeNull) { + Builder.CreateBr(ContBlock); + EmitBlock(ContBlock); + } +} + +void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD, + llvm::Value *VTable) { + // FIXME: Add blacklisting scheme. + if (RD->isInStdNamespace()) + return; + + std::string OutName; + llvm::raw_string_ostream Out(OutName); + CGM.getCXXABI().getMangleContext().mangleCXXVTableBitSet(RD, Out); + + llvm::Value *BitSetName = llvm::MetadataAsValue::get( + getLLVMContext(), llvm::MDString::get(getLLVMContext(), Out.str())); + + llvm::Value *BitSetTest = Builder.CreateCall( + CGM.getIntrinsic(llvm::Intrinsic::bitset_test), + {Builder.CreateBitCast(VTable, CGM.Int8PtrTy), BitSetName}); + + llvm::BasicBlock *ContBlock = createBasicBlock("vtable.check.cont"); + llvm::BasicBlock *TrapBlock = createBasicBlock("vtable.check.trap"); + + Builder.CreateCondBr(BitSetTest, ContBlock, TrapBlock); + + EmitBlock(TrapBlock); + Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::trap), {}); + Builder.CreateUnreachable(); + + EmitBlock(ContBlock); +} // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do // quite what we want. @@ -2140,7 +2284,7 @@ CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base, // This is a record decl. We know the type and can devirtualize it. return VD->getType()->isRecordType(); } - + return false; } @@ -2154,14 +2298,14 @@ CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base, // We can always devirtualize calls on temporary object expressions. if (isa<CXXConstructExpr>(Base)) return true; - + // And calls on bound temporaries. if (isa<CXXBindTemporaryExpr>(Base)) return true; - + // Check if this is a call expr that returns a record type. if (const CallExpr *CE = dyn_cast<CallExpr>(Base)) - return CE->getCallReturnType()->isRecordType(); + return CE->getCallReturnType(getContext())->isRecordType(); // We can't devirtualize the call. return false; @@ -2190,7 +2334,7 @@ void CodeGenFunction::EmitForwardingCallToLambda( // We don't need to separately arrange the call arguments because // the call can't be variadic anyway --- it's impossible to forward // variadic arguments. - + // Now emit our call. RValue RV = EmitCall(calleeFnInfo, callee, returnSlot, callArgs, callOperator); @@ -2218,7 +2362,7 @@ void CodeGenFunction::EmitLambdaBlockInvokeBody() { for (auto param : BD->params()) EmitDelegateCallArg(CallArgs, param, param->getLocStart()); - assert(!Lambda->isGenericLambda() && + assert(!Lambda->isGenericLambda() && "generic lambda interconversion to block not implemented"); EmitForwardingCallToLambda(Lambda->getLambdaCallOperator(), CallArgs); } @@ -2256,7 +2400,7 @@ void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs(); FunctionTemplateDecl *CallOpTemplate = CallOp->getDescribedFunctionTemplate(); void *InsertPos = nullptr; - FunctionDecl *CorrespondingCallOpSpecialization = + FunctionDecl *CorrespondingCallOpSpecialization = CallOpTemplate->findSpecialization(TAL->asArray(), InsertPos); assert(CorrespondingCallOpSpecialization); CallOp = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization); diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp index 18ed3e5..d97e405 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp @@ -52,8 +52,10 @@ DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) { llvm::StructType::get(V.first->getType(), V.second->getType(), (void*) nullptr); llvm::Value *addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex"); - CGF.Builder.CreateStore(V.first, CGF.Builder.CreateStructGEP(addr, 0)); - CGF.Builder.CreateStore(V.second, CGF.Builder.CreateStructGEP(addr, 1)); + CGF.Builder.CreateStore(V.first, + CGF.Builder.CreateStructGEP(ComplexTy, addr, 0)); + CGF.Builder.CreateStore(V.second, + CGF.Builder.CreateStructGEP(ComplexTy, addr, 1)); return saved_type(addr, ComplexAddress); } @@ -82,9 +84,9 @@ RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) { return RValue::getAggregate(CGF.Builder.CreateLoad(Value)); case ComplexAddress: { llvm::Value *real = - CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(Value, 0)); + CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(nullptr, Value, 0)); llvm::Value *imag = - CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(Value, 1)); + CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(nullptr, Value, 1)); return RValue::getComplex(real, imag); } } @@ -123,6 +125,17 @@ char *EHScopeStack::allocate(size_t Size) { return StartOfData; } +bool EHScopeStack::containsOnlyLifetimeMarkers( + EHScopeStack::stable_iterator Old) const { + for (EHScopeStack::iterator it = begin(); stabilize(it) != Old; it++) { + EHCleanupScope *cleanup = dyn_cast<EHCleanupScope>(&*it); + if (!cleanup || !cleanup->isLifetimeMarker()) + return false; + } + + return true; +} + EHScopeStack::stable_iterator EHScopeStack::getInnermostActiveNormalCleanup() const { for (stable_iterator si = getInnermostNormalCleanup(), se = stable_end(); @@ -469,8 +482,14 @@ static void EmitCleanup(CodeGenFunction &CGF, EHScopeStack::Cleanup *Fn, EHScopeStack::Cleanup::Flags flags, llvm::Value *ActiveFlag) { - // EH cleanups always occur within a terminate scope. - if (flags.isForEHCleanup()) CGF.EHStack.pushTerminate(); + // Itanium EH cleanups occur within a terminate scope. Microsoft SEH doesn't + // have this behavior, and the Microsoft C++ runtime will call terminate for + // us if the cleanup throws. + bool PushedTerminate = false; + if (flags.isForEHCleanup() && !CGF.getTarget().getCXXABI().isMicrosoft()) { + CGF.EHStack.pushTerminate(); + PushedTerminate = true; + } // If there's an active flag, load it and skip the cleanup if it's // false. @@ -493,7 +512,8 @@ static void EmitCleanup(CodeGenFunction &CGF, CGF.EmitBlock(ContBB); // Leave the terminate scope. - if (flags.isForEHCleanup()) CGF.EHStack.popTerminate(); + if (PushedTerminate) + CGF.EHStack.popTerminate(); } static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit, @@ -739,7 +759,15 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { Scope.getNumBranchAfters() == 1) { assert(!BranchThroughDest || !IsActive); - // TODO: clean up the possibly dead stores to the cleanup dest slot. + // Clean up the possibly dead store to the cleanup dest slot. + llvm::Instruction *NormalCleanupDestSlot = + cast<llvm::Instruction>(getNormalCleanupDestSlot()); + if (NormalCleanupDestSlot->hasOneUse()) { + NormalCleanupDestSlot->user_back()->eraseFromParent(); + NormalCleanupDestSlot->eraseFromParent(); + NormalCleanupDest = nullptr; + } + llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0); InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter)); @@ -861,8 +889,6 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // Emit the EH cleanup if required. if (RequiresEHCleanup) { - ApplyDebugLocation AutoRestoreLocation(*this, CurEHLocation); - CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); EmitBlock(EHEntry); diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.h index dd156c6..81c6412 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.h @@ -62,6 +62,9 @@ protected: /// Whether this cleanup is currently active. unsigned IsActive : 1; + /// Whether this cleanup is a lifetime marker + unsigned IsLifetimeMarker : 1; + /// Whether the normal cleanup should test the activation flag. unsigned TestFlagInNormalCleanup : 1; @@ -75,7 +78,7 @@ protected: /// The number of fixups required by enclosing scopes (not including /// this one). If this is the top cleanup scope, all the fixups /// from this index onwards belong to this scope. - unsigned FixupDepth : 32 - 17 - NumCommonBits; // currently 13 + unsigned FixupDepth : 32 - 18 - NumCommonBits; // currently 13 }; class FilterBitFields { @@ -272,6 +275,7 @@ public: CleanupBits.IsNormalCleanup = isNormal; CleanupBits.IsEHCleanup = isEH; CleanupBits.IsActive = isActive; + CleanupBits.IsLifetimeMarker = false; CleanupBits.TestFlagInNormalCleanup = false; CleanupBits.TestFlagInEHCleanup = false; CleanupBits.CleanupSize = cleanupSize; @@ -284,19 +288,20 @@ public: delete ExtInfo; } // Objects of EHCleanupScope are not destructed. Use Destroy(). - ~EHCleanupScope() LLVM_DELETED_FUNCTION; + ~EHCleanupScope() = delete; bool isNormalCleanup() const { return CleanupBits.IsNormalCleanup; } llvm::BasicBlock *getNormalBlock() const { return NormalBlock; } void setNormalBlock(llvm::BasicBlock *BB) { NormalBlock = BB; } bool isEHCleanup() const { return CleanupBits.IsEHCleanup; } - llvm::BasicBlock *getEHBlock() const { return getCachedEHDispatchBlock(); } - void setEHBlock(llvm::BasicBlock *BB) { setCachedEHDispatchBlock(BB); } bool isActive() const { return CleanupBits.IsActive; } void setActive(bool A) { CleanupBits.IsActive = A; } + bool isLifetimeMarker() const { return CleanupBits.IsLifetimeMarker; } + void setLifetimeMarker() { CleanupBits.IsLifetimeMarker = true; } + llvm::AllocaInst *getActiveFlag() const { return ActiveFlag; } void setActiveFlag(llvm::AllocaInst *Var) { ActiveFlag = Var; } diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp index 978e1bb..48458db 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp @@ -52,35 +52,48 @@ CGDebugInfo::~CGDebugInfo() { "Region stack mismatch, stack not empty!"); } -ArtificialLocation::ArtificialLocation(CodeGenFunction &CGF) - : ApplyDebugLocation(CGF) { - if (auto *DI = CGF.getDebugInfo()) { - // Construct a location that has a valid scope, but no line info. - assert(!DI->LexicalBlockStack.empty()); - llvm::DIDescriptor Scope(DI->LexicalBlockStack.back()); - CGF.Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(0, 0, Scope)); - } +ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF, + SourceLocation TemporaryLocation) + : CGF(CGF) { + init(TemporaryLocation); } ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF, - SourceLocation TemporaryLocation, - bool ForceColumnInfo) + bool DefaultToEmpty, + SourceLocation TemporaryLocation) : CGF(CGF) { + init(TemporaryLocation, DefaultToEmpty); +} + +void ApplyDebugLocation::init(SourceLocation TemporaryLocation, + bool DefaultToEmpty) { if (auto *DI = CGF.getDebugInfo()) { OriginalLocation = CGF.Builder.getCurrentDebugLocation(); - if (TemporaryLocation.isInvalid()) - CGF.Builder.SetCurrentDebugLocation(llvm::DebugLoc()); - else - DI->EmitLocation(CGF.Builder, TemporaryLocation, ForceColumnInfo); + if (TemporaryLocation.isInvalid()) { + if (DefaultToEmpty) + CGF.Builder.SetCurrentDebugLocation(llvm::DebugLoc()); + else { + // Construct a location that has a valid scope, but no line info. + assert(!DI->LexicalBlockStack.empty()); + CGF.Builder.SetCurrentDebugLocation( + llvm::DebugLoc::get(0, 0, DI->LexicalBlockStack.back())); + } + } else + DI->EmitLocation(CGF.Builder, TemporaryLocation); } } +ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF, const Expr *E) + : CGF(CGF) { + init(E->getExprLoc()); +} + ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF, llvm::DebugLoc Loc) : CGF(CGF) { if (CGF.getDebugInfo()) { OriginalLocation = CGF.Builder.getCurrentDebugLocation(); - if (!Loc.isUnknown()) - CGF.Builder.SetCurrentDebugLocation(Loc); + if (Loc) + CGF.Builder.SetCurrentDebugLocation(std::move(Loc)); } } @@ -88,7 +101,7 @@ ApplyDebugLocation::~ApplyDebugLocation() { // Query CGF so the location isn't overwritten when location updates are // temporarily disabled (for C++ default function arguments) if (CGF.getDebugInfo()) - CGF.Builder.SetCurrentDebugLocation(OriginalLocation); + CGF.Builder.SetCurrentDebugLocation(std::move(OriginalLocation)); } /// ArtificialLocation - An RAII object that temporarily switches to @@ -107,37 +120,33 @@ void CGDebugInfo::setLocation(SourceLocation Loc) { return; SourceManager &SM = CGM.getContext().getSourceManager(); - llvm::DIScope Scope(LexicalBlockStack.back()); + auto *Scope = cast<llvm::DIScope>(LexicalBlockStack.back()); PresumedLoc PCLoc = SM.getPresumedLoc(CurLoc); - if (PCLoc.isInvalid() || Scope.getFilename() == PCLoc.getFilename()) + if (PCLoc.isInvalid() || Scope->getFilename() == PCLoc.getFilename()) return; - if (Scope.isLexicalBlockFile()) { - llvm::DILexicalBlockFile LBF = llvm::DILexicalBlockFile(Scope); - llvm::DIDescriptor D = DBuilder.createLexicalBlockFile( - LBF.getScope(), getOrCreateFile(CurLoc)); - llvm::MDNode *N = D; + if (auto *LBF = dyn_cast<llvm::DILexicalBlockFile>(Scope)) { LexicalBlockStack.pop_back(); - LexicalBlockStack.emplace_back(N); - } else if (Scope.isLexicalBlock() || Scope.isSubprogram()) { - llvm::DIDescriptor D = - DBuilder.createLexicalBlockFile(Scope, getOrCreateFile(CurLoc)); - llvm::MDNode *N = D; + LexicalBlockStack.emplace_back(DBuilder.createLexicalBlockFile( + LBF->getScope(), getOrCreateFile(CurLoc))); + } else if (isa<llvm::DILexicalBlock>(Scope) || + isa<llvm::DISubprogram>(Scope)) { LexicalBlockStack.pop_back(); - LexicalBlockStack.emplace_back(N); + LexicalBlockStack.emplace_back( + DBuilder.createLexicalBlockFile(Scope, getOrCreateFile(CurLoc))); } } /// getContextDescriptor - Get context info for the decl. -llvm::DIScope CGDebugInfo::getContextDescriptor(const Decl *Context) { +llvm::DIScope *CGDebugInfo::getContextDescriptor(const Decl *Context) { if (!Context) return TheCU; auto I = RegionMap.find(Context); if (I != RegionMap.end()) { llvm::Metadata *V = I->second; - return llvm::DIScope(dyn_cast_or_null<llvm::MDNode>(V)); + return dyn_cast_or_null<llvm::DIScope>(V); } // Check namespace. @@ -234,17 +243,17 @@ StringRef CGDebugInfo::getClassName(const RecordDecl *RD) { } /// getOrCreateFile - Get the file debug info descriptor for the input location. -llvm::DIFile CGDebugInfo::getOrCreateFile(SourceLocation Loc) { +llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) { if (!Loc.isValid()) // If Location is not valid then use main input file. - return DBuilder.createFile(TheCU.getFilename(), TheCU.getDirectory()); + return DBuilder.createFile(TheCU->getFilename(), TheCU->getDirectory()); SourceManager &SM = CGM.getContext().getSourceManager(); PresumedLoc PLoc = SM.getPresumedLoc(Loc); if (PLoc.isInvalid() || StringRef(PLoc.getFilename()).empty()) // If the location is not valid then use main input file. - return DBuilder.createFile(TheCU.getFilename(), TheCU.getDirectory()); + return DBuilder.createFile(TheCU->getFilename(), TheCU->getDirectory()); // Cache the results. const char *fname = PLoc.getFilename(); @@ -253,18 +262,19 @@ llvm::DIFile CGDebugInfo::getOrCreateFile(SourceLocation Loc) { if (it != DIFileCache.end()) { // Verify that the information still exists. if (llvm::Metadata *V = it->second) - return llvm::DIFile(cast<llvm::MDNode>(V)); + return cast<llvm::DIFile>(V); } - llvm::DIFile F = DBuilder.createFile(PLoc.getFilename(), getCurrentDirname()); + llvm::DIFile *F = + DBuilder.createFile(PLoc.getFilename(), getCurrentDirname()); DIFileCache[fname].reset(F); return F; } /// getOrCreateMainFile - Get the file info for main compile unit. -llvm::DIFile CGDebugInfo::getOrCreateMainFile() { - return DBuilder.createFile(TheCU.getFilename(), TheCU.getDirectory()); +llvm::DIFile *CGDebugInfo::getOrCreateMainFile() { + return DBuilder.createFile(TheCU->getFilename(), TheCU->getDirectory()); } /// getLineNumber - Get line number for the location. If location is invalid @@ -369,12 +379,13 @@ void CGDebugInfo::CreateCompileUnit() { DebugKind <= CodeGenOptions::DebugLineTablesOnly ? llvm::DIBuilder::LineTablesOnly : llvm::DIBuilder::FullDebug, + 0 /* DWOid */, DebugKind != CodeGenOptions::LocTrackingOnly); } /// CreateType - Get the Basic type from the cache or create a new /// one if necessary. -llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) { +llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) { llvm::dwarf::TypeKind Encoding; StringRef BTName; switch (BT->getKind()) { @@ -386,7 +397,7 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) { case BuiltinType::NullPtr: return DBuilder.createNullPtrType(); case BuiltinType::Void: - return llvm::DIType(); + return nullptr; case BuiltinType::ObjCClass: if (!ClassTy) ClassTy = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type, @@ -409,11 +420,11 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) { unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy); - llvm::DIType ISATy = DBuilder.createPointerType(ClassTy, Size); + auto *ISATy = DBuilder.createPointerType(ClassTy, Size); ObjTy = DBuilder.createStructType(TheCU, "objc_object", getOrCreateMainFile(), - 0, 0, 0, 0, llvm::DIType(), llvm::DIArray()); + 0, 0, 0, 0, nullptr, llvm::DINodeArray()); DBuilder.replaceArrays( ObjTy, @@ -510,11 +521,10 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) { // Bit size, align and offset of the type. uint64_t Size = CGM.getContext().getTypeSize(BT); uint64_t Align = CGM.getContext().getTypeAlign(BT); - llvm::DIType DbgTy = DBuilder.createBasicType(BTName, Size, Align, Encoding); - return DbgTy; + return DBuilder.createBasicType(BTName, Size, Align, Encoding); } -llvm::DIType CGDebugInfo::CreateType(const ComplexType *Ty) { +llvm::DIType *CGDebugInfo::CreateType(const ComplexType *Ty) { // Bit size, align and offset of the type. llvm::dwarf::TypeKind Encoding = llvm::dwarf::DW_ATE_complex_float; if (Ty->isComplexIntegerType()) @@ -522,15 +532,13 @@ llvm::DIType CGDebugInfo::CreateType(const ComplexType *Ty) { uint64_t Size = CGM.getContext().getTypeSize(Ty); uint64_t Align = CGM.getContext().getTypeAlign(Ty); - llvm::DIType DbgTy = - DBuilder.createBasicType("complex", Size, Align, Encoding); - - return DbgTy; + return DBuilder.createBasicType("complex", Size, Align, Encoding); } /// CreateCVRType - Get the qualified type from the cache or create /// a new one if necessary. -llvm::DIType CGDebugInfo::CreateQualifiedType(QualType Ty, llvm::DIFile Unit) { +llvm::DIType *CGDebugInfo::CreateQualifiedType(QualType Ty, + llvm::DIFile *Unit) { QualifierCollector Qc; const Type *T = Qc.strip(Ty); @@ -556,17 +564,15 @@ llvm::DIType CGDebugInfo::CreateQualifiedType(QualType Ty, llvm::DIFile Unit) { return getOrCreateType(QualType(T, 0), Unit); } - llvm::DIType FromTy = getOrCreateType(Qc.apply(CGM.getContext(), T), Unit); + auto *FromTy = getOrCreateType(Qc.apply(CGM.getContext(), T), Unit); // No need to fill in the Name, Line, Size, Alignment, Offset in case of // CVR derived types. - llvm::DIType DbgTy = DBuilder.createQualifiedType(Tag, FromTy); - - return DbgTy; + return DBuilder.createQualifiedType(Tag, FromTy); } -llvm::DIType CGDebugInfo::CreateType(const ObjCObjectPointerType *Ty, - llvm::DIFile Unit) { +llvm::DIType *CGDebugInfo::CreateType(const ObjCObjectPointerType *Ty, + llvm::DIFile *Unit) { // The frontend treats 'id' as a typedef to an ObjCObjectType, // whereas 'id<protocol>' is treated as an ObjCPointerType. For the @@ -574,12 +580,12 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCObjectPointerType *Ty, if (Ty->isObjCQualifiedIdType()) return getOrCreateType(CGM.getContext().getObjCIdType(), Unit); - llvm::DIType DbgTy = CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, - Ty, Ty->getPointeeType(), Unit); - return DbgTy; + return CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty, + Ty->getPointeeType(), Unit); } -llvm::DIType CGDebugInfo::CreateType(const PointerType *Ty, llvm::DIFile Unit) { +llvm::DIType *CGDebugInfo::CreateType(const PointerType *Ty, + llvm::DIFile *Unit) { return CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty, Ty->getPointeeType(), Unit); } @@ -588,12 +594,12 @@ llvm::DIType CGDebugInfo::CreateType(const PointerType *Ty, llvm::DIFile Unit) { /// on their mangled names, if they're external. static SmallString<256> getUniqueTagTypeName(const TagType *Ty, CodeGenModule &CGM, - llvm::DICompileUnit TheCU) { + llvm::DICompileUnit *TheCU) { SmallString<256> FullName; // FIXME: ODR should apply to ObjC++ exactly the same wasy it does to C++. // For now, only apply ODR with C++. const TagDecl *TD = Ty->getDecl(); - if (TheCU.getLanguage() != llvm::dwarf::DW_LANG_C_plus_plus || + if (TheCU->getSourceLanguage() != llvm::dwarf::DW_LANG_C_plus_plus || !TD->isExternallyVisible()) return FullName; // Microsoft Mangler does not have support for mangleCXXRTTIName yet. @@ -608,41 +614,56 @@ static SmallString<256> getUniqueTagTypeName(const TagType *Ty, return FullName; } -// Creates a forward declaration for a RecordDecl in the given context. -llvm::DICompositeType -CGDebugInfo::getOrCreateRecordFwdDecl(const RecordType *Ty, - llvm::DIDescriptor Ctx) { - const RecordDecl *RD = Ty->getDecl(); - if (llvm::DIType T = getTypeOrNull(CGM.getContext().getRecordType(RD))) - return llvm::DICompositeType(T); - llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation()); - unsigned Line = getLineNumber(RD->getLocation()); - StringRef RDName = getClassName(RD); - - llvm::dwarf::Tag Tag; +static llvm::dwarf::Tag getTagForRecord(const RecordDecl *RD) { + llvm::dwarf::Tag Tag; if (RD->isStruct() || RD->isInterface()) Tag = llvm::dwarf::DW_TAG_structure_type; else if (RD->isUnion()) Tag = llvm::dwarf::DW_TAG_union_type; else { + // FIXME: This could be a struct type giving a default visibility different + // than C++ class type, but needs llvm metadata changes first. assert(RD->isClass()); Tag = llvm::dwarf::DW_TAG_class_type; } + return Tag; +} + +// Creates a forward declaration for a RecordDecl in the given context. +llvm::DICompositeType * +CGDebugInfo::getOrCreateRecordFwdDecl(const RecordType *Ty, + llvm::DIScope *Ctx) { + const RecordDecl *RD = Ty->getDecl(); + if (llvm::DIType *T = getTypeOrNull(CGM.getContext().getRecordType(RD))) + return cast<llvm::DICompositeType>(T); + llvm::DIFile *DefUnit = getOrCreateFile(RD->getLocation()); + unsigned Line = getLineNumber(RD->getLocation()); + StringRef RDName = getClassName(RD); + + uint64_t Size = 0; + uint64_t Align = 0; + + const RecordDecl *D = RD->getDefinition(); + if (D && D->isCompleteDefinition()) { + Size = CGM.getContext().getTypeSize(Ty); + Align = CGM.getContext().getTypeAlign(Ty); + } // Create the type. SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU); - llvm::DICompositeType RetTy = DBuilder.createReplaceableForwardDecl( - Tag, RDName, Ctx, DefUnit, Line, 0, 0, 0, FullName); + llvm::DICompositeType *RetTy = DBuilder.createReplaceableCompositeType( + getTagForRecord(RD), RDName, Ctx, DefUnit, Line, 0, Size, Align, + llvm::DINode::FlagFwdDecl, FullName); ReplaceMap.emplace_back( std::piecewise_construct, std::make_tuple(Ty), std::make_tuple(static_cast<llvm::Metadata *>(RetTy))); return RetTy; } -llvm::DIType CGDebugInfo::CreatePointerLikeType(llvm::dwarf::Tag Tag, - const Type *Ty, - QualType PointeeTy, - llvm::DIFile Unit) { +llvm::DIType *CGDebugInfo::CreatePointerLikeType(llvm::dwarf::Tag Tag, + const Type *Ty, + QualType PointeeTy, + llvm::DIFile *Unit) { if (Tag == llvm::dwarf::DW_TAG_reference_type || Tag == llvm::dwarf::DW_TAG_rvalue_reference_type) return DBuilder.createReferenceType(Tag, getOrCreateType(PointeeTy, Unit)); @@ -658,8 +679,8 @@ llvm::DIType CGDebugInfo::CreatePointerLikeType(llvm::dwarf::Tag Tag, Align); } -llvm::DIType CGDebugInfo::getOrCreateStructPtrType(StringRef Name, - llvm::DIType &Cache) { +llvm::DIType *CGDebugInfo::getOrCreateStructPtrType(StringRef Name, + llvm::DIType *&Cache) { if (Cache) return Cache; Cache = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type, Name, @@ -669,18 +690,16 @@ llvm::DIType CGDebugInfo::getOrCreateStructPtrType(StringRef Name, return Cache; } -llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty, - llvm::DIFile Unit) { +llvm::DIType *CGDebugInfo::CreateType(const BlockPointerType *Ty, + llvm::DIFile *Unit) { if (BlockLiteralGeneric) return BlockLiteralGeneric; SmallVector<llvm::Metadata *, 8> EltTys; - llvm::DIType FieldTy; QualType FType; uint64_t FieldSize, FieldOffset; unsigned FieldAlign; - llvm::DIArray Elements; - llvm::DIType EltTy, DescTy; + llvm::DINodeArray Elements; FieldOffset = 0; FType = CGM.getContext().UnsignedLongTy; @@ -690,17 +709,17 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty, Elements = DBuilder.getOrCreateArray(EltTys); EltTys.clear(); - unsigned Flags = llvm::DIDescriptor::FlagAppleBlock; + unsigned Flags = llvm::DINode::FlagAppleBlock; unsigned LineNo = getLineNumber(CurLoc); - EltTy = DBuilder.createStructType(Unit, "__block_descriptor", Unit, LineNo, - FieldOffset, 0, Flags, llvm::DIType(), - Elements); + auto *EltTy = + DBuilder.createStructType(Unit, "__block_descriptor", Unit, LineNo, + FieldOffset, 0, Flags, nullptr, Elements); // Bit size, align and offset of the type. uint64_t Size = CGM.getContext().getTypeSize(Ty); - DescTy = DBuilder.createPointerType(EltTy, Size); + auto *DescTy = DBuilder.createPointerType(EltTy, Size); FieldOffset = 0; FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy); @@ -712,29 +731,27 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty, EltTys.push_back(CreateMemberType(Unit, FType, "__FuncPtr", &FieldOffset)); FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy); - FieldTy = DescTy; FieldSize = CGM.getContext().getTypeSize(Ty); FieldAlign = CGM.getContext().getTypeAlign(Ty); - FieldTy = - DBuilder.createMemberType(Unit, "__descriptor", Unit, LineNo, FieldSize, - FieldAlign, FieldOffset, 0, FieldTy); - EltTys.push_back(FieldTy); + EltTys.push_back(DBuilder.createMemberType(Unit, "__descriptor", Unit, LineNo, + FieldSize, FieldAlign, FieldOffset, + 0, DescTy)); FieldOffset += FieldSize; Elements = DBuilder.getOrCreateArray(EltTys); - EltTy = DBuilder.createStructType(Unit, "__block_literal_generic", Unit, - LineNo, FieldOffset, 0, Flags, - llvm::DIType(), Elements); + EltTy = + DBuilder.createStructType(Unit, "__block_literal_generic", Unit, LineNo, + FieldOffset, 0, Flags, nullptr, Elements); BlockLiteralGeneric = DBuilder.createPointerType(EltTy, Size); return BlockLiteralGeneric; } -llvm::DIType CGDebugInfo::CreateType(const TemplateSpecializationType *Ty, - llvm::DIFile Unit) { +llvm::DIType *CGDebugInfo::CreateType(const TemplateSpecializationType *Ty, + llvm::DIFile *Unit) { assert(Ty->isTypeAlias()); - llvm::DIType Src = getOrCreateType(Ty->getAliasedType(), Unit); + llvm::DIType *Src = getOrCreateType(Ty->getAliasedType(), Unit); SmallString<128> NS; llvm::raw_svector_ostream OS(NS); @@ -749,35 +766,26 @@ llvm::DIType CGDebugInfo::CreateType(const TemplateSpecializationType *Ty, Ty->getTemplateName().getAsTemplateDecl())->getTemplatedDecl(); SourceLocation Loc = AliasDecl->getLocation(); - llvm::DIFile File = getOrCreateFile(Loc); - unsigned Line = getLineNumber(Loc); - - llvm::DIDescriptor Ctxt = - getContextDescriptor(cast<Decl>(AliasDecl->getDeclContext())); - - return DBuilder.createTypedef(Src, internString(OS.str()), File, Line, Ctxt); + return DBuilder.createTypedef( + Src, internString(OS.str()), getOrCreateFile(Loc), getLineNumber(Loc), + getContextDescriptor(cast<Decl>(AliasDecl->getDeclContext()))); } -llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty, llvm::DIFile Unit) { - // Typedefs are derived from some other type. If we have a typedef of a - // typedef, make sure to emit the whole chain. - llvm::DIType Src = getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit); +llvm::DIType *CGDebugInfo::CreateType(const TypedefType *Ty, + llvm::DIFile *Unit) { // We don't set size information, but do specify where the typedef was // declared. SourceLocation Loc = Ty->getDecl()->getLocation(); - llvm::DIFile File = getOrCreateFile(Loc); - unsigned Line = getLineNumber(Loc); - const TypedefNameDecl *TyDecl = Ty->getDecl(); - llvm::DIDescriptor TypedefContext = - getContextDescriptor(cast<Decl>(Ty->getDecl()->getDeclContext())); - - return DBuilder.createTypedef(Src, TyDecl->getName(), File, Line, - TypedefContext); + // Typedefs are derived from some other type. + return DBuilder.createTypedef( + getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit), + Ty->getDecl()->getName(), getOrCreateFile(Loc), getLineNumber(Loc), + getContextDescriptor(cast<Decl>(Ty->getDecl()->getDeclContext()))); } -llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty, - llvm::DIFile Unit) { +llvm::DIType *CGDebugInfo::CreateType(const FunctionType *Ty, + llvm::DIFile *Unit) { SmallVector<llvm::Metadata *, 16> EltTys; // Add the result type at least. @@ -794,11 +802,11 @@ llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty, EltTys.push_back(DBuilder.createUnspecifiedParameter()); } - llvm::DITypeArray EltTypeArray = DBuilder.getOrCreateTypeArray(EltTys); + llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(EltTys); return DBuilder.createSubroutineType(Unit, EltTypeArray); } -/// Convert an AccessSpecifier into the corresponding DIDescriptor flag. +/// Convert an AccessSpecifier into the corresponding DINode flag. /// As an optimization, return 0 if the access specifier equals the /// default for the containing type. static unsigned getAccessFlag(AccessSpecifier Access, const RecordDecl *RD) { @@ -813,25 +821,25 @@ static unsigned getAccessFlag(AccessSpecifier Access, const RecordDecl *RD) { switch (Access) { case clang::AS_private: - return llvm::DIDescriptor::FlagPrivate; + return llvm::DINode::FlagPrivate; case clang::AS_protected: - return llvm::DIDescriptor::FlagProtected; + return llvm::DINode::FlagProtected; case clang::AS_public: - return llvm::DIDescriptor::FlagPublic; + return llvm::DINode::FlagPublic; case clang::AS_none: return 0; } llvm_unreachable("unexpected access enumerator"); } -llvm::DIType CGDebugInfo::createFieldType( +llvm::DIType *CGDebugInfo::createFieldType( StringRef name, QualType type, uint64_t sizeInBitsOverride, SourceLocation loc, AccessSpecifier AS, uint64_t offsetInBits, - llvm::DIFile tunit, llvm::DIScope scope, const RecordDecl *RD) { - llvm::DIType debugType = getOrCreateType(type, tunit); + llvm::DIFile *tunit, llvm::DIScope *scope, const RecordDecl *RD) { + llvm::DIType *debugType = getOrCreateType(type, tunit); // Get the location for the field. - llvm::DIFile file = getOrCreateFile(loc); + llvm::DIFile *file = getOrCreateFile(loc); unsigned line = getLineNumber(loc); uint64_t SizeInBits = 0; @@ -853,7 +861,7 @@ llvm::DIType CGDebugInfo::createFieldType( /// CollectRecordLambdaFields - Helper for CollectRecordFields. void CGDebugInfo::CollectRecordLambdaFields( const CXXRecordDecl *CXXDecl, SmallVectorImpl<llvm::Metadata *> &elements, - llvm::DIType RecordTy) { + llvm::DIType *RecordTy) { // For C++11 Lambdas a Field will be the same as a Capture, but the Capture // has the name and the location of the variable so we should iterate over // both concurrently. @@ -866,14 +874,14 @@ void CGDebugInfo::CollectRecordLambdaFields( const LambdaCapture &C = *I; if (C.capturesVariable()) { VarDecl *V = C.getCapturedVar(); - llvm::DIFile VUnit = getOrCreateFile(C.getLocation()); + llvm::DIFile *VUnit = getOrCreateFile(C.getLocation()); StringRef VName = V->getName(); uint64_t SizeInBitsOverride = 0; if (Field->isBitField()) { SizeInBitsOverride = Field->getBitWidthValue(CGM.getContext()); assert(SizeInBitsOverride && "found named 0-width bitfield"); } - llvm::DIType fieldType = createFieldType( + llvm::DIType *fieldType = createFieldType( VName, Field->getType(), SizeInBitsOverride, C.getLocation(), Field->getAccess(), layout.getFieldOffset(fieldno), VUnit, RecordTy, CXXDecl); @@ -884,9 +892,9 @@ void CGDebugInfo::CollectRecordLambdaFields( // by using AT_object_pointer for the function and having that be // used as 'this' for semantic references. FieldDecl *f = *Field; - llvm::DIFile VUnit = getOrCreateFile(f->getLocation()); + llvm::DIFile *VUnit = getOrCreateFile(f->getLocation()); QualType type = f->getType(); - llvm::DIType fieldType = createFieldType( + llvm::DIType *fieldType = createFieldType( "this", type, 0, f->getLocation(), f->getAccess(), layout.getFieldOffset(fieldno), VUnit, RecordTy, CXXDecl); @@ -896,14 +904,14 @@ void CGDebugInfo::CollectRecordLambdaFields( } /// Helper for CollectRecordFields. -llvm::DIDerivedType CGDebugInfo::CreateRecordStaticField(const VarDecl *Var, - llvm::DIType RecordTy, - const RecordDecl *RD) { +llvm::DIDerivedType * +CGDebugInfo::CreateRecordStaticField(const VarDecl *Var, llvm::DIType *RecordTy, + const RecordDecl *RD) { // Create the descriptor for the static variable, with or without // constant initializers. Var = Var->getCanonicalDecl(); - llvm::DIFile VUnit = getOrCreateFile(Var->getLocation()); - llvm::DIType VTy = getOrCreateType(Var->getType(), VUnit); + llvm::DIFile *VUnit = getOrCreateFile(Var->getLocation()); + llvm::DIType *VTy = getOrCreateType(Var->getType(), VUnit); unsigned LineNumber = getLineNumber(Var->getLocation()); StringRef VName = Var->getName(); @@ -919,7 +927,7 @@ llvm::DIDerivedType CGDebugInfo::CreateRecordStaticField(const VarDecl *Var, } unsigned Flags = getAccessFlag(Var->getAccess(), RD); - llvm::DIDerivedType GV = DBuilder.createStaticMemberType( + llvm::DIDerivedType *GV = DBuilder.createStaticMemberType( RecordTy, VName, VUnit, LineNumber, VTy, Flags, C); StaticDataMemberCache[Var->getCanonicalDecl()].reset(GV); return GV; @@ -927,8 +935,8 @@ llvm::DIDerivedType CGDebugInfo::CreateRecordStaticField(const VarDecl *Var, /// CollectRecordNormalField - Helper for CollectRecordFields. void CGDebugInfo::CollectRecordNormalField( - const FieldDecl *field, uint64_t OffsetInBits, llvm::DIFile tunit, - SmallVectorImpl<llvm::Metadata *> &elements, llvm::DIType RecordTy, + const FieldDecl *field, uint64_t OffsetInBits, llvm::DIFile *tunit, + SmallVectorImpl<llvm::Metadata *> &elements, llvm::DIType *RecordTy, const RecordDecl *RD) { StringRef name = field->getName(); QualType type = field->getType(); @@ -943,7 +951,7 @@ void CGDebugInfo::CollectRecordNormalField( assert(SizeInBitsOverride && "found named 0-width bitfield"); } - llvm::DIType fieldType = + llvm::DIType *fieldType = createFieldType(name, type, SizeInBitsOverride, field->getLocation(), field->getAccess(), OffsetInBits, tunit, RecordTy, RD); @@ -953,9 +961,9 @@ void CGDebugInfo::CollectRecordNormalField( /// CollectRecordFields - A helper function to collect debug info for /// record fields. This is used while creating debug info entry for a Record. void CGDebugInfo::CollectRecordFields( - const RecordDecl *record, llvm::DIFile tunit, + const RecordDecl *record, llvm::DIFile *tunit, SmallVectorImpl<llvm::Metadata *> &elements, - llvm::DICompositeType RecordTy) { + llvm::DICompositeType *RecordTy) { const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(record); if (CXXDecl && CXXDecl->isLambda()) @@ -975,8 +983,7 @@ void CGDebugInfo::CollectRecordFields( if (MI != StaticDataMemberCache.end()) { assert(MI->second && "Static data member declaration should still exist"); - elements.push_back( - llvm::DIDerivedType(cast<llvm::MDNode>(MI->second))); + elements.push_back(cast<llvm::DIDerivedTypeBase>(MI->second)); } else { auto Field = CreateRecordStaticField(V, RecordTy, record); elements.push_back(Field); @@ -994,27 +1001,29 @@ void CGDebugInfo::CollectRecordFields( /// getOrCreateMethodType - CXXMethodDecl's type is a FunctionType. This /// function type is not updated to include implicit "this" pointer. Use this /// routine to get a method type which includes "this" pointer. -llvm::DICompositeType +llvm::DISubroutineType * CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method, - llvm::DIFile Unit) { + llvm::DIFile *Unit) { const FunctionProtoType *Func = Method->getType()->getAs<FunctionProtoType>(); if (Method->isStatic()) - return llvm::DICompositeType(getOrCreateType(QualType(Func, 0), Unit)); + return cast_or_null<llvm::DISubroutineType>( + getOrCreateType(QualType(Func, 0), Unit)); return getOrCreateInstanceMethodType(Method->getThisType(CGM.getContext()), Func, Unit); } -llvm::DICompositeType CGDebugInfo::getOrCreateInstanceMethodType( - QualType ThisPtr, const FunctionProtoType *Func, llvm::DIFile Unit) { +llvm::DISubroutineType *CGDebugInfo::getOrCreateInstanceMethodType( + QualType ThisPtr, const FunctionProtoType *Func, llvm::DIFile *Unit) { // Add "this" pointer. - llvm::DITypeArray Args = llvm::DISubroutineType( - getOrCreateType(QualType(Func, 0), Unit)).getTypeArray(); - assert(Args.getNumElements() && "Invalid number of arguments!"); + llvm::DITypeRefArray Args( + cast<llvm::DISubroutineType>(getOrCreateType(QualType(Func, 0), Unit)) + ->getTypeArray()); + assert(Args.size() && "Invalid number of arguments!"); SmallVector<llvm::Metadata *, 16> Elts; // First element is always return type. For 'void' functions it is NULL. - Elts.push_back(Args.getElement(0)); + Elts.push_back(Args[0]); // "this" pointer is always first argument. const CXXRecordDecl *RD = ThisPtr->getPointeeCXXRecordDecl(); @@ -1025,8 +1034,8 @@ llvm::DICompositeType CGDebugInfo::getOrCreateInstanceMethodType( unsigned AS = CGM.getContext().getTargetAddressSpace(PointeeTy); uint64_t Size = CGM.getTarget().getPointerWidth(AS); uint64_t Align = CGM.getContext().getTypeAlign(ThisPtrTy); - llvm::DIType PointeeType = getOrCreateType(PointeeTy, Unit); - llvm::DIType ThisPtrType = + llvm::DIType *PointeeType = getOrCreateType(PointeeTy, Unit); + llvm::DIType *ThisPtrType = DBuilder.createPointerType(PointeeType, Size, Align); TypeCache[ThisPtr.getAsOpaquePtr()].reset(ThisPtrType); // TODO: This and the artificial type below are misleading, the @@ -1035,23 +1044,23 @@ llvm::DICompositeType CGDebugInfo::getOrCreateInstanceMethodType( ThisPtrType = DBuilder.createObjectPointerType(ThisPtrType); Elts.push_back(ThisPtrType); } else { - llvm::DIType ThisPtrType = getOrCreateType(ThisPtr, Unit); + llvm::DIType *ThisPtrType = getOrCreateType(ThisPtr, Unit); TypeCache[ThisPtr.getAsOpaquePtr()].reset(ThisPtrType); ThisPtrType = DBuilder.createObjectPointerType(ThisPtrType); Elts.push_back(ThisPtrType); } // Copy rest of the arguments. - for (unsigned i = 1, e = Args.getNumElements(); i != e; ++i) - Elts.push_back(Args.getElement(i)); + for (unsigned i = 1, e = Args.size(); i != e; ++i) + Elts.push_back(Args[i]); - llvm::DITypeArray EltTypeArray = DBuilder.getOrCreateTypeArray(Elts); + llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(Elts); unsigned Flags = 0; if (Func->getExtProtoInfo().RefQualifier == RQ_LValue) - Flags |= llvm::DIDescriptor::FlagLValueReference; + Flags |= llvm::DINode::FlagLValueReference; if (Func->getExtProtoInfo().RefQualifier == RQ_RValue) - Flags |= llvm::DIDescriptor::FlagRValueReference; + Flags |= llvm::DINode::FlagRValueReference; return DBuilder.createSubroutineType(Unit, EltTypeArray, Flags); } @@ -1066,16 +1075,15 @@ static bool isFunctionLocalClass(const CXXRecordDecl *RD) { return false; } -/// CreateCXXMemberFunction - A helper function to create a DISubprogram for +/// CreateCXXMemberFunction - A helper function to create a subprogram for /// a single member function GlobalDecl. -llvm::DISubprogram -CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method, - llvm::DIFile Unit, llvm::DIType RecordTy) { +llvm::DISubprogram *CGDebugInfo::CreateCXXMemberFunction( + const CXXMethodDecl *Method, llvm::DIFile *Unit, llvm::DIType *RecordTy) { bool IsCtorOrDtor = isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method); StringRef MethodName = getFunctionName(Method); - llvm::DICompositeType MethodTy = getOrCreateMethodType(Method, Unit); + llvm::DISubroutineType *MethodTy = getOrCreateMethodType(Method, Unit); // Since a single ctor/dtor corresponds to multiple functions, it doesn't // make sense to give a single ctor/dtor a linkage name. @@ -1084,7 +1092,7 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method, MethodLinkageName = CGM.getMangledName(Method); // Get the location for the method. - llvm::DIFile MethodDefUnit; + llvm::DIFile *MethodDefUnit = nullptr; unsigned MethodLine = 0; if (!Method->isImplicit()) { MethodDefUnit = getOrCreateFile(Method->getLocation()); @@ -1092,7 +1100,7 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method, } // Collect virtual method info. - llvm::DIType ContainingType; + llvm::DIType *ContainingType = nullptr; unsigned Virtuality = 0; unsigned VIndex = 0; @@ -1115,29 +1123,29 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method, unsigned Flags = 0; if (Method->isImplicit()) - Flags |= llvm::DIDescriptor::FlagArtificial; + Flags |= llvm::DINode::FlagArtificial; Flags |= getAccessFlag(Method->getAccess(), Method->getParent()); if (const CXXConstructorDecl *CXXC = dyn_cast<CXXConstructorDecl>(Method)) { if (CXXC->isExplicit()) - Flags |= llvm::DIDescriptor::FlagExplicit; + Flags |= llvm::DINode::FlagExplicit; } else if (const CXXConversionDecl *CXXC = dyn_cast<CXXConversionDecl>(Method)) { if (CXXC->isExplicit()) - Flags |= llvm::DIDescriptor::FlagExplicit; + Flags |= llvm::DINode::FlagExplicit; } if (Method->hasPrototype()) - Flags |= llvm::DIDescriptor::FlagPrototyped; + Flags |= llvm::DINode::FlagPrototyped; if (Method->getRefQualifier() == RQ_LValue) - Flags |= llvm::DIDescriptor::FlagLValueReference; + Flags |= llvm::DINode::FlagLValueReference; if (Method->getRefQualifier() == RQ_RValue) - Flags |= llvm::DIDescriptor::FlagRValueReference; + Flags |= llvm::DINode::FlagRValueReference; - llvm::DIArray TParamsArray = CollectFunctionTemplateParams(Method, Unit); - llvm::DISubprogram SP = DBuilder.createMethod( + llvm::DINodeArray TParamsArray = CollectFunctionTemplateParams(Method, Unit); + llvm::DISubprogram *SP = DBuilder.createMethod( RecordTy, MethodName, MethodLinkageName, MethodDefUnit, MethodLine, MethodTy, /*isLocalToUnit=*/false, /* isDefinition=*/false, Virtuality, VIndex, ContainingType, Flags, - CGM.getLangOpts().Optimize, nullptr, TParamsArray); + CGM.getLangOpts().Optimize, nullptr, TParamsArray.get()); SPCache[Method->getCanonicalDecl()].reset(SP); @@ -1148,8 +1156,8 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method, /// C++ member functions. This is used while creating debug info entry for /// a Record. void CGDebugInfo::CollectCXXMemberFunctions( - const CXXRecordDecl *RD, llvm::DIFile Unit, - SmallVectorImpl<llvm::Metadata *> &EltTys, llvm::DIType RecordTy) { + const CXXRecordDecl *RD, llvm::DIFile *Unit, + SmallVectorImpl<llvm::Metadata *> &EltTys, llvm::DIType *RecordTy) { // Since we want more than just the individual member decls if we // have templated functions iterate over every declaration to gather @@ -1187,10 +1195,9 @@ void CGDebugInfo::CollectCXXMemberFunctions( /// CollectCXXBases - A helper function to collect debug info for /// C++ base classes. This is used while creating debug info entry for /// a Record. -void CGDebugInfo::CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit, +void CGDebugInfo::CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile *Unit, SmallVectorImpl<llvm::Metadata *> &EltTys, - llvm::DIType RecordTy) { - + llvm::DIType *RecordTy) { const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD); for (const auto &BI : RD->bases()) { unsigned BFlags = 0; @@ -1212,24 +1219,24 @@ void CGDebugInfo::CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit, BaseOffset = 4 * CGM.getMicrosoftVTableContext().getVBTableIndex(RD, Base); } - BFlags = llvm::DIDescriptor::FlagVirtual; + BFlags = llvm::DINode::FlagVirtual; } else BaseOffset = CGM.getContext().toBits(RL.getBaseClassOffset(Base)); // FIXME: Inconsistent units for BaseOffset. It is in bytes when // BI->isVirtual() and bits when not. BFlags |= getAccessFlag(BI.getAccessSpecifier(), RD); - llvm::DIType DTy = DBuilder.createInheritance( + llvm::DIType *DTy = DBuilder.createInheritance( RecordTy, getOrCreateType(BI.getType(), Unit), BaseOffset, BFlags); EltTys.push_back(DTy); } } /// CollectTemplateParams - A helper function to collect template parameters. -llvm::DIArray +llvm::DINodeArray CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList, ArrayRef<TemplateArgument> TAList, - llvm::DIFile Unit) { + llvm::DIFile *Unit) { SmallVector<llvm::Metadata *, 16> TemplateParams; for (unsigned i = 0, e = TAList.size(); i != e; ++i) { const TemplateArgument &TA = TAList[i]; @@ -1238,23 +1245,20 @@ CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList, Name = TPList->getParam(i)->getName(); switch (TA.getKind()) { case TemplateArgument::Type: { - llvm::DIType TTy = getOrCreateType(TA.getAsType(), Unit); - llvm::DITemplateTypeParameter TTP = - DBuilder.createTemplateTypeParameter(TheCU, Name, TTy); - TemplateParams.push_back(TTP); + llvm::DIType *TTy = getOrCreateType(TA.getAsType(), Unit); + TemplateParams.push_back( + DBuilder.createTemplateTypeParameter(TheCU, Name, TTy)); } break; case TemplateArgument::Integral: { - llvm::DIType TTy = getOrCreateType(TA.getIntegralType(), Unit); - llvm::DITemplateValueParameter TVP = - DBuilder.createTemplateValueParameter( - TheCU, Name, TTy, - llvm::ConstantInt::get(CGM.getLLVMContext(), TA.getAsIntegral())); - TemplateParams.push_back(TVP); + llvm::DIType *TTy = getOrCreateType(TA.getIntegralType(), Unit); + TemplateParams.push_back(DBuilder.createTemplateValueParameter( + TheCU, Name, TTy, + llvm::ConstantInt::get(CGM.getLLVMContext(), TA.getAsIntegral()))); } break; case TemplateArgument::Declaration: { const ValueDecl *D = TA.getAsDecl(); QualType T = TA.getParamTypeForDecl().getDesugaredType(CGM.getContext()); - llvm::DIType TTy = getOrCreateType(T, Unit); + llvm::DIType *TTy = getOrCreateType(T, Unit); llvm::Constant *V = nullptr; const CXXMethodDecl *MD; // Variable pointer template parameters have a value that is the address @@ -1278,15 +1282,13 @@ CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList, CGM.getContext().toCharUnitsFromBits((int64_t)fieldOffset); V = CGM.getCXXABI().EmitMemberDataPointer(MPT, chars); } - llvm::DITemplateValueParameter TVP = - DBuilder.createTemplateValueParameter( - TheCU, Name, TTy, - cast_or_null<llvm::Constant>(V->stripPointerCasts())); - TemplateParams.push_back(TVP); + TemplateParams.push_back(DBuilder.createTemplateValueParameter( + TheCU, Name, TTy, + cast_or_null<llvm::Constant>(V->stripPointerCasts()))); } break; case TemplateArgument::NullPtr: { QualType T = TA.getNullPtrType(); - llvm::DIType TTy = getOrCreateType(T, Unit); + llvm::DIType *TTy = getOrCreateType(T, Unit); llvm::Constant *V = nullptr; // Special case member data pointer null values since they're actually -1 // instead of zero. @@ -1301,24 +1303,19 @@ CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList, V = CGM.getCXXABI().EmitNullMemberPointer(MPT); if (!V) V = llvm::ConstantInt::get(CGM.Int8Ty, 0); - llvm::DITemplateValueParameter TVP = - DBuilder.createTemplateValueParameter(TheCU, Name, TTy, - cast<llvm::Constant>(V)); - TemplateParams.push_back(TVP); - } break; - case TemplateArgument::Template: { - llvm::DITemplateValueParameter - TVP = DBuilder.createTemplateTemplateParameter( - TheCU, Name, llvm::DIType(), - TA.getAsTemplate().getAsTemplateDecl()->getQualifiedNameAsString()); - TemplateParams.push_back(TVP); - } break; - case TemplateArgument::Pack: { - llvm::DITemplateValueParameter TVP = DBuilder.createTemplateParameterPack( - TheCU, Name, llvm::DIType(), - CollectTemplateParams(nullptr, TA.getPackAsArray(), Unit)); - TemplateParams.push_back(TVP); + TemplateParams.push_back(DBuilder.createTemplateValueParameter( + TheCU, Name, TTy, cast<llvm::Constant>(V))); } break; + case TemplateArgument::Template: + TemplateParams.push_back(DBuilder.createTemplateTemplateParameter( + TheCU, Name, nullptr, + TA.getAsTemplate().getAsTemplateDecl()->getQualifiedNameAsString())); + break; + case TemplateArgument::Pack: + TemplateParams.push_back(DBuilder.createTemplateParameterPack( + TheCU, Name, nullptr, + CollectTemplateParams(nullptr, TA.getPackAsArray(), Unit))); + break; case TemplateArgument::Expression: { const Expr *E = TA.getAsExpr(); QualType T = E->getType(); @@ -1326,11 +1323,9 @@ CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList, T = CGM.getContext().getLValueReferenceType(T); llvm::Constant *V = CGM.EmitConstantExpr(E, T); assert(V && "Expression in template argument isn't constant"); - llvm::DIType TTy = getOrCreateType(T, Unit); - llvm::DITemplateValueParameter TVP = - DBuilder.createTemplateValueParameter( - TheCU, Name, TTy, cast<llvm::Constant>(V->stripPointerCasts())); - TemplateParams.push_back(TVP); + llvm::DIType *TTy = getOrCreateType(T, Unit); + TemplateParams.push_back(DBuilder.createTemplateValueParameter( + TheCU, Name, TTy, cast<llvm::Constant>(V->stripPointerCasts()))); } break; // And the following should never occur: case TemplateArgument::TemplateExpansion: @@ -1344,8 +1339,9 @@ CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList, /// CollectFunctionTemplateParams - A helper function to collect debug /// info for function template parameters. -llvm::DIArray CGDebugInfo::CollectFunctionTemplateParams(const FunctionDecl *FD, - llvm::DIFile Unit) { +llvm::DINodeArray +CGDebugInfo::CollectFunctionTemplateParams(const FunctionDecl *FD, + llvm::DIFile *Unit) { if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplateSpecialization) { const TemplateParameterList *TList = FD->getTemplateSpecializationInfo() @@ -1354,13 +1350,13 @@ llvm::DIArray CGDebugInfo::CollectFunctionTemplateParams(const FunctionDecl *FD, return CollectTemplateParams( TList, FD->getTemplateSpecializationArgs()->asArray(), Unit); } - return llvm::DIArray(); + return llvm::DINodeArray(); } /// CollectCXXTemplateParams - A helper function to collect debug info for /// template parameters. -llvm::DIArray CGDebugInfo::CollectCXXTemplateParams( - const ClassTemplateSpecializationDecl *TSpecial, llvm::DIFile Unit) { +llvm::DINodeArray CGDebugInfo::CollectCXXTemplateParams( + const ClassTemplateSpecializationDecl *TSpecial, llvm::DIFile *Unit) { // Always get the full list of parameters, not just the ones from // the specialization. TemplateParameterList *TPList = @@ -1370,18 +1366,18 @@ llvm::DIArray CGDebugInfo::CollectCXXTemplateParams( } /// getOrCreateVTablePtrType - Return debug info descriptor for vtable. -llvm::DIType CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile Unit) { - if (VTablePtrType.isValid()) +llvm::DIType *CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile *Unit) { + if (VTablePtrType) return VTablePtrType; ASTContext &Context = CGM.getContext(); /* Function type */ llvm::Metadata *STy = getOrCreateType(Context.IntTy, Unit); - llvm::DITypeArray SElements = DBuilder.getOrCreateTypeArray(STy); - llvm::DIType SubTy = DBuilder.createSubroutineType(Unit, SElements); + llvm::DITypeRefArray SElements = DBuilder.getOrCreateTypeArray(STy); + llvm::DIType *SubTy = DBuilder.createSubroutineType(Unit, SElements); unsigned Size = Context.getTypeSize(Context.VoidPtrTy); - llvm::DIType vtbl_ptr_type = + llvm::DIType *vtbl_ptr_type = DBuilder.createPointerType(SubTy, Size, 0, "__vtbl_ptr_type"); VTablePtrType = DBuilder.createPointerType(vtbl_ptr_type, Size); return VTablePtrType; @@ -1395,7 +1391,7 @@ StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) { /// CollectVTableInfo - If the C++ class has vtable info then insert appropriate /// debug info entry in EltTys vector. -void CGDebugInfo::CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit, +void CGDebugInfo::CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile *Unit, SmallVectorImpl<llvm::Metadata *> &EltTys) { const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD); @@ -1408,26 +1404,26 @@ void CGDebugInfo::CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit, return; unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy); - llvm::DIType VPTR = DBuilder.createMemberType( + llvm::DIType *VPTR = DBuilder.createMemberType( Unit, getVTableName(RD), Unit, 0, Size, 0, 0, - llvm::DIDescriptor::FlagArtificial, getOrCreateVTablePtrType(Unit)); + llvm::DINode::FlagArtificial, getOrCreateVTablePtrType(Unit)); EltTys.push_back(VPTR); } /// getOrCreateRecordType - Emit record type's standalone debug info. -llvm::DIType CGDebugInfo::getOrCreateRecordType(QualType RTy, - SourceLocation Loc) { +llvm::DIType *CGDebugInfo::getOrCreateRecordType(QualType RTy, + SourceLocation Loc) { assert(DebugKind >= CodeGenOptions::LimitedDebugInfo); - llvm::DIType T = getOrCreateType(RTy, getOrCreateFile(Loc)); + llvm::DIType *T = getOrCreateType(RTy, getOrCreateFile(Loc)); return T; } /// getOrCreateInterfaceType - Emit an objective c interface type standalone /// debug info. -llvm::DIType CGDebugInfo::getOrCreateInterfaceType(QualType D, - SourceLocation Loc) { +llvm::DIType *CGDebugInfo::getOrCreateInterfaceType(QualType D, + SourceLocation Loc) { assert(DebugKind >= CodeGenOptions::LimitedDebugInfo); - llvm::DIType T = getOrCreateType(D, getOrCreateFile(Loc)); + llvm::DIType *T = getOrCreateType(D, getOrCreateFile(Loc)); RetainedTypes.push_back(D.getAsOpaquePtr()); return T; } @@ -1438,11 +1434,10 @@ void CGDebugInfo::completeType(const EnumDecl *ED) { QualType Ty = CGM.getContext().getEnumType(ED); void *TyPtr = Ty.getAsOpaquePtr(); auto I = TypeCache.find(TyPtr); - if (I == TypeCache.end() || - !llvm::DIType(cast<llvm::MDNode>(I->second)).isForwardDecl()) + if (I == TypeCache.end() || !cast<llvm::DIType>(I->second)->isForwardDecl()) return; - llvm::DIType Res = CreateTypeDefinition(Ty->castAs<EnumType>()); - assert(!Res.isForwardDecl()); + llvm::DIType *Res = CreateTypeDefinition(Ty->castAs<EnumType>()); + assert(!Res->isForwardDecl()); TypeCache[TyPtr].reset(Res); } @@ -1461,8 +1456,8 @@ void CGDebugInfo::completeRequiredType(const RecordDecl *RD) { return; QualType Ty = CGM.getContext().getRecordType(RD); - llvm::DIType T = getTypeOrNull(Ty); - if (T && T.isForwardDecl()) + llvm::DIType *T = getTypeOrNull(Ty); + if (T && T->isForwardDecl()) completeClassData(RD); } @@ -1472,11 +1467,10 @@ void CGDebugInfo::completeClassData(const RecordDecl *RD) { QualType Ty = CGM.getContext().getRecordType(RD); void *TyPtr = Ty.getAsOpaquePtr(); auto I = TypeCache.find(TyPtr); - if (I != TypeCache.end() && - !llvm::DIType(cast<llvm::MDNode>(I->second)).isForwardDecl()) + if (I != TypeCache.end() && !cast<llvm::DIType>(I->second)->isForwardDecl()) return; - llvm::DIType Res = CreateTypeDefinition(Ty->castAs<RecordType>()); - assert(!Res.isForwardDecl()); + llvm::DIType *Res = CreateTypeDefinition(Ty->castAs<RecordType>()); + assert(!Res->isForwardDecl()); TypeCache[TyPtr].reset(Res); } @@ -1524,9 +1518,9 @@ static bool shouldOmitDefinition(CodeGenOptions::DebugInfoKind DebugKind, } /// CreateType - get structure or union type. -llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) { +llvm::DIType *CGDebugInfo::CreateType(const RecordType *Ty) { RecordDecl *RD = Ty->getDecl(); - llvm::DICompositeType T(getTypeOrNull(QualType(Ty, 0))); + llvm::DIType *T = cast_or_null<llvm::DIType>(getTypeOrNull(QualType(Ty, 0))); if (T || shouldOmitDefinition(DebugKind, RD, CGM.getLangOpts())) { if (!T) T = getOrCreateRecordFwdDecl( @@ -1537,11 +1531,11 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) { return CreateTypeDefinition(Ty); } -llvm::DIType CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) { +llvm::DIType *CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) { RecordDecl *RD = Ty->getDecl(); // Get overall information about the record type for the debug info. - llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation()); + llvm::DIFile *DefUnit = getOrCreateFile(RD->getLocation()); // Records and classes and unions can all be recursive. To handle them, we // first generate a debug descriptor for the struct as a forward declaration. @@ -1550,11 +1544,11 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) { // may refer to the forward decl if the struct is recursive) and replace all // uses of the forward declaration with the final definition. - llvm::DICompositeType FwdDecl(getOrCreateLimitedType(Ty, DefUnit)); - assert(FwdDecl.isCompositeType() && - "The debug type of a RecordType should be a llvm::DICompositeType"); + auto *FwdDecl = + cast<llvm::DICompositeType>(getOrCreateLimitedType(Ty, DefUnit)); - if (FwdDecl.isForwardDecl()) + const RecordDecl *D = RD->getDefinition(); + if (!D || !D->isCompleteDefinition()) return FwdDecl; if (const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD)) @@ -1586,16 +1580,20 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) { LexicalBlockStack.pop_back(); RegionMap.erase(Ty->getDecl()); - llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys); + llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys); DBuilder.replaceArrays(FwdDecl, Elements); + if (FwdDecl->isTemporary()) + FwdDecl = + llvm::MDNode::replaceWithPermanent(llvm::TempDICompositeType(FwdDecl)); + RegionMap[Ty->getDecl()].reset(FwdDecl); return FwdDecl; } /// CreateType - get objective-c object type. -llvm::DIType CGDebugInfo::CreateType(const ObjCObjectType *Ty, - llvm::DIFile Unit) { +llvm::DIType *CGDebugInfo::CreateType(const ObjCObjectType *Ty, + llvm::DIFile *Unit) { // Ignore protocols. return getOrCreateType(Ty->getBaseType(), Unit); } @@ -1625,22 +1623,23 @@ static bool hasDefaultSetterName(const ObjCPropertyDecl *PD, } /// CreateType - get objective-c interface type. -llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty, - llvm::DIFile Unit) { +llvm::DIType *CGDebugInfo::CreateType(const ObjCInterfaceType *Ty, + llvm::DIFile *Unit) { ObjCInterfaceDecl *ID = Ty->getDecl(); if (!ID) - return llvm::DIType(); + return nullptr; // Get overall information about the record type for the debug info. - llvm::DIFile DefUnit = getOrCreateFile(ID->getLocation()); + llvm::DIFile *DefUnit = getOrCreateFile(ID->getLocation()); unsigned Line = getLineNumber(ID->getLocation()); - llvm::dwarf::SourceLanguage RuntimeLang = TheCU.getLanguage(); + auto RuntimeLang = + static_cast<llvm::dwarf::SourceLanguage>(TheCU->getSourceLanguage()); // If this is just a forward declaration return a special forward-declaration // debug type since we won't be able to lay out the entire type. ObjCInterfaceDecl *Def = ID->getDefinition(); if (!Def || !Def->getImplementation()) { - llvm::DIType FwdDecl = DBuilder.createReplaceableForwardDecl( + llvm::DIType *FwdDecl = DBuilder.createReplaceableCompositeType( llvm::dwarf::DW_TAG_structure_type, ID->getName(), TheCU, DefUnit, Line, RuntimeLang); ObjCInterfaceCache.push_back(ObjCInterfaceCacheEntry(Ty, FwdDecl, Unit)); @@ -1650,12 +1649,12 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty, return CreateTypeDefinition(Ty, Unit); } -llvm::DIType CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty, - llvm::DIFile Unit) { +llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty, + llvm::DIFile *Unit) { ObjCInterfaceDecl *ID = Ty->getDecl(); - llvm::DIFile DefUnit = getOrCreateFile(ID->getLocation()); + llvm::DIFile *DefUnit = getOrCreateFile(ID->getLocation()); unsigned Line = getLineNumber(ID->getLocation()); - unsigned RuntimeLang = TheCU.getLanguage(); + unsigned RuntimeLang = TheCU->getSourceLanguage(); // Bit size, align and offset of the type. uint64_t Size = CGM.getContext().getTypeSize(Ty); @@ -1663,17 +1662,17 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty, unsigned Flags = 0; if (ID->getImplementation()) - Flags |= llvm::DIDescriptor::FlagObjcClassComplete; + Flags |= llvm::DINode::FlagObjcClassComplete; - llvm::DICompositeType RealDecl = DBuilder.createStructType( - Unit, ID->getName(), DefUnit, Line, Size, Align, Flags, llvm::DIType(), - llvm::DIArray(), RuntimeLang); + llvm::DICompositeType *RealDecl = DBuilder.createStructType( + Unit, ID->getName(), DefUnit, Line, Size, Align, Flags, nullptr, + llvm::DINodeArray(), RuntimeLang); QualType QTy(Ty, 0); TypeCache[QTy.getAsOpaquePtr()].reset(RealDecl); // Push the struct on region stack. - LexicalBlockStack.emplace_back(static_cast<llvm::MDNode *>(RealDecl)); + LexicalBlockStack.emplace_back(RealDecl); RegionMap[Ty->getDecl()].reset(RealDecl); // Convert all the elements. @@ -1681,19 +1680,19 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty, ObjCInterfaceDecl *SClass = ID->getSuperClass(); if (SClass) { - llvm::DIType SClassTy = + llvm::DIType *SClassTy = getOrCreateType(CGM.getContext().getObjCInterfaceType(SClass), Unit); - if (!SClassTy.isValid()) - return llvm::DIType(); + if (!SClassTy) + return nullptr; - llvm::DIType InhTag = DBuilder.createInheritance(RealDecl, SClassTy, 0, 0); + llvm::DIType *InhTag = DBuilder.createInheritance(RealDecl, SClassTy, 0, 0); EltTys.push_back(InhTag); } // Create entries for all of the properties. for (const auto *PD : ID->properties()) { SourceLocation Loc = PD->getLocation(); - llvm::DIFile PUnit = getOrCreateFile(Loc); + llvm::DIFile *PUnit = getOrCreateFile(Loc); unsigned PLine = getLineNumber(Loc); ObjCMethodDecl *Getter = PD->getGetterMethodDecl(); ObjCMethodDecl *Setter = PD->getSetterMethodDecl(); @@ -1711,9 +1710,9 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty, unsigned FieldNo = 0; for (ObjCIvarDecl *Field = ID->all_declared_ivar_begin(); Field; Field = Field->getNextIvar(), ++FieldNo) { - llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit); - if (!FieldTy.isValid()) - return llvm::DIType(); + llvm::DIType *FieldTy = getOrCreateType(Field->getType(), Unit); + if (!FieldTy) + return nullptr; StringRef FieldName = Field->getName(); @@ -1722,7 +1721,7 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty, continue; // Get the location for the field. - llvm::DIFile FieldDefUnit = getOrCreateFile(Field->getLocation()); + llvm::DIFile *FieldDefUnit = getOrCreateFile(Field->getLocation()); unsigned FieldLine = getLineNumber(Field->getLocation()); QualType FType = Field->getType(); uint64_t FieldSize = 0; @@ -1755,11 +1754,11 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty, unsigned Flags = 0; if (Field->getAccessControl() == ObjCIvarDecl::Protected) - Flags = llvm::DIDescriptor::FlagProtected; + Flags = llvm::DINode::FlagProtected; else if (Field->getAccessControl() == ObjCIvarDecl::Private) - Flags = llvm::DIDescriptor::FlagPrivate; + Flags = llvm::DINode::FlagPrivate; else if (Field->getAccessControl() == ObjCIvarDecl::Public) - Flags = llvm::DIDescriptor::FlagPublic; + Flags = llvm::DINode::FlagPublic; llvm::MDNode *PropertyNode = nullptr; if (ObjCImplementationDecl *ImpD = ID->getImplementation()) { @@ -1767,7 +1766,7 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty, ImpD->FindPropertyImplIvarDecl(Field->getIdentifier())) { if (ObjCPropertyDecl *PD = PImpD->getPropertyDecl()) { SourceLocation Loc = PD->getLocation(); - llvm::DIFile PUnit = getOrCreateFile(Loc); + llvm::DIFile *PUnit = getOrCreateFile(Loc); unsigned PLine = getLineNumber(Loc); ObjCMethodDecl *Getter = PD->getGetterMethodDecl(); ObjCMethodDecl *Setter = PD->getSetterMethodDecl(); @@ -1788,15 +1787,16 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty, EltTys.push_back(FieldTy); } - llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys); + llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys); DBuilder.replaceArrays(RealDecl, Elements); LexicalBlockStack.pop_back(); return RealDecl; } -llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty, llvm::DIFile Unit) { - llvm::DIType ElementTy = getOrCreateType(Ty->getElementType(), Unit); +llvm::DIType *CGDebugInfo::CreateType(const VectorType *Ty, + llvm::DIFile *Unit) { + llvm::DIType *ElementTy = getOrCreateType(Ty->getElementType(), Unit); int64_t Count = Ty->getNumElements(); if (Count == 0) // If number of elements are not known then this is an unbounded array. @@ -1804,7 +1804,7 @@ llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty, llvm::DIFile Unit) { Count = -1; llvm::Metadata *Subscript = DBuilder.getOrCreateSubrange(0, Count); - llvm::DIArray SubscriptArray = DBuilder.getOrCreateArray(Subscript); + llvm::DINodeArray SubscriptArray = DBuilder.getOrCreateArray(Subscript); uint64_t Size = CGM.getContext().getTypeSize(Ty); uint64_t Align = CGM.getContext().getTypeAlign(Ty); @@ -1812,7 +1812,7 @@ llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty, llvm::DIFile Unit) { return DBuilder.createVectorType(Size, Align, ElementTy, SubscriptArray); } -llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty, llvm::DIFile Unit) { +llvm::DIType *CGDebugInfo::CreateType(const ArrayType *Ty, llvm::DIFile *Unit) { uint64_t Size; uint64_t Align; @@ -1858,32 +1858,33 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty, llvm::DIFile Unit) { EltTy = Ty->getElementType(); } - llvm::DIArray SubscriptArray = DBuilder.getOrCreateArray(Subscripts); + llvm::DINodeArray SubscriptArray = DBuilder.getOrCreateArray(Subscripts); - llvm::DIType DbgTy = DBuilder.createArrayType( - Size, Align, getOrCreateType(EltTy, Unit), SubscriptArray); - return DbgTy; + return DBuilder.createArrayType(Size, Align, getOrCreateType(EltTy, Unit), + SubscriptArray); } -llvm::DIType CGDebugInfo::CreateType(const LValueReferenceType *Ty, - llvm::DIFile Unit) { +llvm::DIType *CGDebugInfo::CreateType(const LValueReferenceType *Ty, + llvm::DIFile *Unit) { return CreatePointerLikeType(llvm::dwarf::DW_TAG_reference_type, Ty, Ty->getPointeeType(), Unit); } -llvm::DIType CGDebugInfo::CreateType(const RValueReferenceType *Ty, - llvm::DIFile Unit) { +llvm::DIType *CGDebugInfo::CreateType(const RValueReferenceType *Ty, + llvm::DIFile *Unit) { return CreatePointerLikeType(llvm::dwarf::DW_TAG_rvalue_reference_type, Ty, Ty->getPointeeType(), Unit); } -llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty, - llvm::DIFile U) { - llvm::DIType ClassType = getOrCreateType(QualType(Ty->getClass(), 0), U); - if (!Ty->getPointeeType()->isFunctionType()) +llvm::DIType *CGDebugInfo::CreateType(const MemberPointerType *Ty, + llvm::DIFile *U) { + uint64_t Size = CGM.getCXXABI().isTypeInfoCalculable(QualType(Ty, 0)) + ? CGM.getContext().getTypeSize(Ty) + : 0; + llvm::DIType *ClassType = getOrCreateType(QualType(Ty->getClass(), 0), U); + if (Ty->isMemberDataPointerType()) return DBuilder.createMemberPointerType( - getOrCreateType(Ty->getPointeeType(), U), ClassType, - CGM.getContext().getTypeSize(Ty)); + getOrCreateType(Ty->getPointeeType(), U), ClassType, Size); const FunctionProtoType *FPT = Ty->getPointeeType()->getAs<FunctionProtoType>(); @@ -1891,17 +1892,17 @@ llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty, getOrCreateInstanceMethodType(CGM.getContext().getPointerType(QualType( Ty->getClass(), FPT->getTypeQuals())), FPT, U), - ClassType, CGM.getContext().getTypeSize(Ty)); + ClassType, Size); } -llvm::DIType CGDebugInfo::CreateType(const AtomicType *Ty, llvm::DIFile U) { +llvm::DIType *CGDebugInfo::CreateType(const AtomicType *Ty, llvm::DIFile *U) { // Ignore the atomic wrapping // FIXME: What is the correct representation? return getOrCreateType(Ty->getValueType(), U); } /// CreateEnumType - get enumeration type. -llvm::DIType CGDebugInfo::CreateEnumType(const EnumType *Ty) { +llvm::DIType *CGDebugInfo::CreateEnumType(const EnumType *Ty) { const EnumDecl *ED = Ty->getDecl(); uint64_t Size = 0; uint64_t Align = 0; @@ -1915,14 +1916,14 @@ llvm::DIType CGDebugInfo::CreateEnumType(const EnumType *Ty) { // If this is just a forward declaration, construct an appropriately // marked node and just return it. if (!ED->getDefinition()) { - llvm::DIDescriptor EDContext; - EDContext = getContextDescriptor(cast<Decl>(ED->getDeclContext())); - llvm::DIFile DefUnit = getOrCreateFile(ED->getLocation()); + llvm::DIScope *EDContext = + getContextDescriptor(cast<Decl>(ED->getDeclContext())); + llvm::DIFile *DefUnit = getOrCreateFile(ED->getLocation()); unsigned Line = getLineNumber(ED->getLocation()); StringRef EDName = ED->getName(); - llvm::DIType RetTy = DBuilder.createReplaceableForwardDecl( + llvm::DIType *RetTy = DBuilder.createReplaceableCompositeType( llvm::dwarf::DW_TAG_enumeration_type, EDName, EDContext, DefUnit, Line, - 0, Size, Align, FullName); + 0, Size, Align, llvm::DINode::FlagFwdDecl, FullName); ReplaceMap.emplace_back( std::piecewise_construct, std::make_tuple(Ty), std::make_tuple(static_cast<llvm::Metadata *>(RetTy))); @@ -1932,7 +1933,7 @@ llvm::DIType CGDebugInfo::CreateEnumType(const EnumType *Ty) { return CreateTypeDefinition(Ty); } -llvm::DIType CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) { +llvm::DIType *CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) { const EnumDecl *ED = Ty->getDecl(); uint64_t Size = 0; uint64_t Align = 0; @@ -1943,7 +1944,7 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) { SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU); - // Create DIEnumerator elements for each enumerator. + // Create elements for each enumerator. SmallVector<llvm::Metadata *, 16> Enumerators; ED = ED->getDefinition(); for (const auto *Enum : ED->enumerators()) { @@ -1952,19 +1953,17 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) { } // Return a CompositeType for the enum itself. - llvm::DIArray EltArray = DBuilder.getOrCreateArray(Enumerators); + llvm::DINodeArray EltArray = DBuilder.getOrCreateArray(Enumerators); - llvm::DIFile DefUnit = getOrCreateFile(ED->getLocation()); + llvm::DIFile *DefUnit = getOrCreateFile(ED->getLocation()); unsigned Line = getLineNumber(ED->getLocation()); - llvm::DIDescriptor EnumContext = + llvm::DIScope *EnumContext = getContextDescriptor(cast<Decl>(ED->getDeclContext())); - llvm::DIType ClassTy = ED->isFixed() - ? getOrCreateType(ED->getIntegerType(), DefUnit) - : llvm::DIType(); - llvm::DIType DbgTy = - DBuilder.createEnumerationType(EnumContext, ED->getName(), DefUnit, Line, - Size, Align, EltArray, ClassTy, FullName); - return DbgTy; + llvm::DIType *ClassTy = + ED->isFixed() ? getOrCreateType(ED->getIntegerType(), DefUnit) : nullptr; + return DBuilder.createEnumerationType(EnumContext, ED->getName(), DefUnit, + Line, Size, Align, EltArray, ClassTy, + FullName); } static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) { @@ -2024,7 +2023,7 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) { /// getType - Get the type from the cache or return null type if it doesn't /// exist. -llvm::DIType CGDebugInfo::getTypeOrNull(QualType Ty) { +llvm::DIType *CGDebugInfo::getTypeOrNull(QualType Ty) { // Unwrap the type as needed for debug information. Ty = UnwrapTypeForDebugInfo(Ty, CGM.getContext()); @@ -2033,10 +2032,10 @@ llvm::DIType CGDebugInfo::getTypeOrNull(QualType Ty) { if (it != TypeCache.end()) { // Verify that the debug info still exists. if (llvm::Metadata *V = it->second) - return llvm::DIType(cast<llvm::MDNode>(V)); + return cast<llvm::DIType>(V); } - return llvm::DIType(); + return nullptr; } void CGDebugInfo::completeTemplateDefinition( @@ -2052,18 +2051,18 @@ void CGDebugInfo::completeTemplateDefinition( /// getOrCreateType - Get the type from the cache or create a new /// one if necessary. -llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile Unit) { +llvm::DIType *CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile *Unit) { if (Ty.isNull()) - return llvm::DIType(); + return nullptr; // Unwrap the type as needed for debug information. Ty = UnwrapTypeForDebugInfo(Ty, CGM.getContext()); - if (llvm::DIType T = getTypeOrNull(Ty)) + if (auto *T = getTypeOrNull(Ty)) return T; // Otherwise create the type. - llvm::DIType Res = CreateTypeNode(Ty, Unit); + llvm::DIType *Res = CreateTypeNode(Ty, Unit); void *TyPtr = Ty.getAsOpaquePtr(); // And update the type cache. @@ -2099,7 +2098,7 @@ ObjCInterfaceDecl *CGDebugInfo::getObjCInterfaceDecl(QualType Ty) { } /// CreateTypeNode - Create a new debug type node. -llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile Unit) { +llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) { // Handle qualifiers, which recursively handles what they refer to. if (Ty.hasLocalQualifiers()) return CreateQualifiedType(Ty, Unit); @@ -2181,25 +2180,25 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile Unit) { /// getOrCreateLimitedType - Get the type from the cache or create a new /// limited type if necessary. -llvm::DIType CGDebugInfo::getOrCreateLimitedType(const RecordType *Ty, - llvm::DIFile Unit) { +llvm::DIType *CGDebugInfo::getOrCreateLimitedType(const RecordType *Ty, + llvm::DIFile *Unit) { QualType QTy(Ty, 0); - llvm::DICompositeType T(getTypeOrNull(QTy)); + auto *T = cast_or_null<llvm::DICompositeTypeBase>(getTypeOrNull(QTy)); // We may have cached a forward decl when we could have created // a non-forward decl. Go ahead and create a non-forward decl // now. - if (T && !T.isForwardDecl()) + if (T && !T->isForwardDecl()) return T; // Otherwise create the type. - llvm::DICompositeType Res = CreateLimitedType(Ty); + llvm::DICompositeType *Res = CreateLimitedType(Ty); // Propagate members from the declaration to the definition // CreateType(const RecordType*) will overwrite this with the members in the // correct order if the full type is needed. - DBuilder.replaceArrays(Res, T.getElements()); + DBuilder.replaceArrays(Res, T ? T->getElements() : llvm::DINodeArray()); // And update the type cache. TypeCache[QTy.getAsOpaquePtr()].reset(Res); @@ -2207,21 +2206,22 @@ llvm::DIType CGDebugInfo::getOrCreateLimitedType(const RecordType *Ty, } // TODO: Currently used for context chains when limiting debug info. -llvm::DICompositeType CGDebugInfo::CreateLimitedType(const RecordType *Ty) { +llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) { RecordDecl *RD = Ty->getDecl(); // Get overall information about the record type for the debug info. - llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation()); + llvm::DIFile *DefUnit = getOrCreateFile(RD->getLocation()); unsigned Line = getLineNumber(RD->getLocation()); StringRef RDName = getClassName(RD); - llvm::DIDescriptor RDContext = + llvm::DIScope *RDContext = getContextDescriptor(cast<Decl>(RD->getDeclContext())); // If we ended up creating the type during the context chain construction, // just return that. - llvm::DICompositeType T(getTypeOrNull(CGM.getContext().getRecordType(RD))); - if (T && (!T.isForwardDecl() || !RD->getDefinition())) + auto *T = cast_or_null<llvm::DICompositeType>( + getTypeOrNull(CGM.getContext().getRecordType(RD))); + if (T && (!T->isForwardDecl() || !RD->getDefinition())) return T; // If this is just a forward or incomplete declaration, construct an @@ -2232,38 +2232,27 @@ llvm::DICompositeType CGDebugInfo::CreateLimitedType(const RecordType *Ty) { uint64_t Size = CGM.getContext().getTypeSize(Ty); uint64_t Align = CGM.getContext().getTypeAlign(Ty); - llvm::DICompositeType RealDecl; SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU); - if (RD->isUnion()) - RealDecl = DBuilder.createUnionType(RDContext, RDName, DefUnit, Line, Size, - Align, 0, llvm::DIArray(), 0, FullName); - else if (RD->isClass()) { - // FIXME: This could be a struct type giving a default visibility different - // than C++ class type, but needs llvm metadata changes first. - RealDecl = DBuilder.createClassType( - RDContext, RDName, DefUnit, Line, Size, Align, 0, 0, llvm::DIType(), - llvm::DIArray(), llvm::DIType(), llvm::DIArray(), FullName); - } else - RealDecl = DBuilder.createStructType( - RDContext, RDName, DefUnit, Line, Size, Align, 0, llvm::DIType(), - llvm::DIArray(), 0, llvm::DIType(), FullName); + llvm::DICompositeType *RealDecl = DBuilder.createReplaceableCompositeType( + getTagForRecord(RD), RDName, RDContext, DefUnit, Line, 0, Size, Align, 0, + FullName); RegionMap[Ty->getDecl()].reset(RealDecl); TypeCache[QualType(Ty, 0).getAsOpaquePtr()].reset(RealDecl); if (const ClassTemplateSpecializationDecl *TSpecial = dyn_cast<ClassTemplateSpecializationDecl>(RD)) - DBuilder.replaceArrays(RealDecl, llvm::DIArray(), + DBuilder.replaceArrays(RealDecl, llvm::DINodeArray(), CollectCXXTemplateParams(TSpecial, DefUnit)); return RealDecl; } void CGDebugInfo::CollectContainingType(const CXXRecordDecl *RD, - llvm::DICompositeType RealDecl) { + llvm::DICompositeType *RealDecl) { // A class's primary base or the class itself contains the vtable. - llvm::DICompositeType ContainingType; + llvm::DICompositeType *ContainingType = nullptr; const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD); if (const CXXRecordDecl *PBase = RL.getPrimaryBase()) { // Seek non-virtual primary base root. @@ -2275,7 +2264,7 @@ void CGDebugInfo::CollectContainingType(const CXXRecordDecl *RD, else break; } - ContainingType = llvm::DICompositeType( + ContainingType = cast<llvm::DICompositeType>( getOrCreateType(QualType(PBase->getTypeForDecl(), 0), getOrCreateFile(RD->getLocation()))); } else if (RD->isDynamicClass()) @@ -2285,29 +2274,29 @@ void CGDebugInfo::CollectContainingType(const CXXRecordDecl *RD, } /// CreateMemberType - Create new member and increase Offset by FType's size. -llvm::DIType CGDebugInfo::CreateMemberType(llvm::DIFile Unit, QualType FType, - StringRef Name, uint64_t *Offset) { - llvm::DIType FieldTy = CGDebugInfo::getOrCreateType(FType, Unit); +llvm::DIType *CGDebugInfo::CreateMemberType(llvm::DIFile *Unit, QualType FType, + StringRef Name, uint64_t *Offset) { + llvm::DIType *FieldTy = CGDebugInfo::getOrCreateType(FType, Unit); uint64_t FieldSize = CGM.getContext().getTypeSize(FType); unsigned FieldAlign = CGM.getContext().getTypeAlign(FType); - llvm::DIType Ty = DBuilder.createMemberType(Unit, Name, Unit, 0, FieldSize, - FieldAlign, *Offset, 0, FieldTy); + llvm::DIType *Ty = DBuilder.createMemberType(Unit, Name, Unit, 0, FieldSize, + FieldAlign, *Offset, 0, FieldTy); *Offset += FieldSize; return Ty; } -void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD, - llvm::DIFile Unit, - StringRef &Name, StringRef &LinkageName, - llvm::DIDescriptor &FDContext, - llvm::DIArray &TParamsArray, +void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD, llvm::DIFile *Unit, + StringRef &Name, + StringRef &LinkageName, + llvm::DIScope *&FDContext, + llvm::DINodeArray &TParamsArray, unsigned &Flags) { const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); Name = getFunctionName(FD); // Use mangled name as linkage name for C/C++ functions. if (FD->hasPrototype()) { LinkageName = CGM.getMangledName(GD); - Flags |= llvm::DIDescriptor::FlagPrototyped; + Flags |= llvm::DINode::FlagPrototyped; } // No need to replicate the linkage name if it isn't different from the // subprogram name, no need to have it at all unless coverage is enabled or @@ -2330,10 +2319,10 @@ void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD, } } -void CGDebugInfo::collectVarDeclProps(const VarDecl *VD, llvm::DIFile &Unit, +void CGDebugInfo::collectVarDeclProps(const VarDecl *VD, llvm::DIFile *&Unit, unsigned &LineNo, QualType &T, StringRef &Name, StringRef &LinkageName, - llvm::DIDescriptor &VDContext) { + llvm::DIScope *&VDContext) { Unit = getOrCreateFile(VD->getLocation()); LineNo = getLineNumber(VD->getLocation()); @@ -2362,19 +2351,27 @@ void CGDebugInfo::collectVarDeclProps(const VarDecl *VD, llvm::DIFile &Unit, // FIXME: Generalize this for even non-member global variables where the // declaration and definition may have different lexical decl contexts, once // we have support for emitting declarations of (non-member) global variables. - VDContext = getContextDescriptor( - dyn_cast<Decl>(VD->isStaticDataMember() ? VD->getLexicalDeclContext() - : VD->getDeclContext())); -} - -llvm::DISubprogram + const DeclContext *DC = VD->isStaticDataMember() ? VD->getLexicalDeclContext() + : VD->getDeclContext(); + // When a record type contains an in-line initialization of a static data + // member, and the record type is marked as __declspec(dllexport), an implicit + // definition of the member will be created in the record context. DWARF + // doesn't seem to have a nice way to describe this in a form that consumers + // are likely to understand, so fake the "normal" situation of a definition + // outside the class by putting it in the global scope. + if (DC->isRecord()) + DC = CGM.getContext().getTranslationUnitDecl(); + VDContext = getContextDescriptor(dyn_cast<Decl>(DC)); +} + +llvm::DISubprogram * CGDebugInfo::getFunctionForwardDeclaration(const FunctionDecl *FD) { - llvm::DIArray TParamsArray; + llvm::DINodeArray TParamsArray; StringRef Name, LinkageName; unsigned Flags = 0; SourceLocation Loc = FD->getLocation(); - llvm::DIFile Unit = getOrCreateFile(Loc); - llvm::DIDescriptor DContext(Unit); + llvm::DIFile *Unit = getOrCreateFile(Loc); + llvm::DIScope *DContext = Unit; unsigned Line = getLineNumber(Loc); collectFunctionDeclProps(FD, Unit, Name, LinkageName, DContext, @@ -2386,35 +2383,31 @@ CGDebugInfo::getFunctionForwardDeclaration(const FunctionDecl *FD) { QualType FnType = CGM.getContext().getFunctionType(FD->getReturnType(), ArgTypes, FunctionProtoType::ExtProtoInfo()); - llvm::DISubprogram SP = - DBuilder.createTempFunctionFwdDecl(DContext, Name, LinkageName, Unit, Line, - getOrCreateFunctionType(FD, FnType, Unit), - !FD->isExternallyVisible(), - false /*declaration*/, 0, Flags, - CGM.getLangOpts().Optimize, nullptr, - TParamsArray, getFunctionDeclaration(FD)); + llvm::DISubprogram *SP = DBuilder.createTempFunctionFwdDecl( + DContext, Name, LinkageName, Unit, Line, + getOrCreateFunctionType(FD, FnType, Unit), !FD->isExternallyVisible(), + false /*declaration*/, 0, Flags, CGM.getLangOpts().Optimize, nullptr, + TParamsArray.get(), getFunctionDeclaration(FD)); const FunctionDecl *CanonDecl = cast<FunctionDecl>(FD->getCanonicalDecl()); - FwdDeclReplaceMap.emplace_back( - std::piecewise_construct, std::make_tuple(CanonDecl), - std::make_tuple(static_cast<llvm::Metadata *>(SP))); + FwdDeclReplaceMap.emplace_back(std::piecewise_construct, + std::make_tuple(CanonDecl), + std::make_tuple(SP)); return SP; } -llvm::DIGlobalVariable +llvm::DIGlobalVariable * CGDebugInfo::getGlobalVariableForwardDeclaration(const VarDecl *VD) { QualType T; StringRef Name, LinkageName; SourceLocation Loc = VD->getLocation(); - llvm::DIFile Unit = getOrCreateFile(Loc); - llvm::DIDescriptor DContext(Unit); + llvm::DIFile *Unit = getOrCreateFile(Loc); + llvm::DIScope *DContext = Unit; unsigned Line = getLineNumber(Loc); collectVarDeclProps(VD, Unit, Line, T, Name, LinkageName, DContext); - llvm::DIGlobalVariable GV = - DBuilder.createTempGlobalVariableFwdDecl(DContext, Name, LinkageName, Unit, - Line, getOrCreateType(T, Unit), - !VD->isExternallyVisible(), - nullptr, nullptr); + auto *GV = DBuilder.createTempGlobalVariableFwdDecl( + DContext, Name, LinkageName, Unit, Line, getOrCreateType(T, Unit), + !VD->isExternallyVisible(), nullptr, nullptr); FwdDeclReplaceMap.emplace_back( std::piecewise_construct, std::make_tuple(cast<VarDecl>(VD->getCanonicalDecl())), @@ -2422,7 +2415,7 @@ CGDebugInfo::getGlobalVariableForwardDeclaration(const VarDecl *VD) { return GV; } -llvm::DIDescriptor CGDebugInfo::getDeclarationOrDefinition(const Decl *D) { +llvm::DINode *CGDebugInfo::getDeclarationOrDefinition(const Decl *D) { // We only need a declaration (not a definition) of the type - so use whatever // we would otherwise do to get a type for a pointee. (forward declarations in // limited debug info, full definitions (if the type definition is available) @@ -2433,7 +2426,7 @@ llvm::DIDescriptor CGDebugInfo::getDeclarationOrDefinition(const Decl *D) { auto I = DeclCache.find(D->getCanonicalDecl()); if (I != DeclCache.end()) - return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(I->second)); + return dyn_cast_or_null<llvm::DINode>(I->second); // No definition for now. Emit a forward definition that might be // merged with a potential upcoming definition. @@ -2442,59 +2435,55 @@ llvm::DIDescriptor CGDebugInfo::getDeclarationOrDefinition(const Decl *D) { else if (const auto *VD = dyn_cast<VarDecl>(D)) return getGlobalVariableForwardDeclaration(VD); - return llvm::DIDescriptor(); + return nullptr; } /// getFunctionDeclaration - Return debug info descriptor to describe method /// declaration for the given method definition. -llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) { +llvm::DISubprogram *CGDebugInfo::getFunctionDeclaration(const Decl *D) { if (!D || DebugKind <= CodeGenOptions::DebugLineTablesOnly) - return llvm::DISubprogram(); + return nullptr; const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); if (!FD) - return llvm::DISubprogram(); + return nullptr; // Setup context. - llvm::DIScope S = getContextDescriptor(cast<Decl>(D->getDeclContext())); + auto *S = getContextDescriptor(cast<Decl>(D->getDeclContext())); auto MI = SPCache.find(FD->getCanonicalDecl()); if (MI == SPCache.end()) { if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD->getCanonicalDecl())) { - llvm::DICompositeType T(S); - llvm::DISubprogram SP = - CreateCXXMemberFunction(MD, getOrCreateFile(MD->getLocation()), T); - return SP; + return CreateCXXMemberFunction(MD, getOrCreateFile(MD->getLocation()), + cast<llvm::DICompositeType>(S)); } } if (MI != SPCache.end()) { - llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(MI->second)); - if (SP.isSubprogram() && !SP.isDefinition()) + auto *SP = dyn_cast_or_null<llvm::DISubprogram>(MI->second); + if (SP && !SP->isDefinition()) return SP; } for (auto NextFD : FD->redecls()) { auto MI = SPCache.find(NextFD->getCanonicalDecl()); if (MI != SPCache.end()) { - llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(MI->second)); - if (SP.isSubprogram() && !SP.isDefinition()) + auto *SP = dyn_cast_or_null<llvm::DISubprogram>(MI->second); + if (SP && !SP->isDefinition()) return SP; } } - return llvm::DISubprogram(); + return nullptr; } -// getOrCreateFunctionType - Construct DIType. If it is a c++ method, include +// getOrCreateFunctionType - Construct type. If it is a c++ method, include // implicit parameter "this". -llvm::DICompositeType CGDebugInfo::getOrCreateFunctionType(const Decl *D, - QualType FnType, - llvm::DIFile F) { +llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D, + QualType FnType, + llvm::DIFile *F) { if (!D || DebugKind <= CodeGenOptions::DebugLineTablesOnly) - // Create fake but valid subroutine type. Otherwise - // llvm::DISubprogram::Verify() would return false, and - // subprogram DIE will miss DW_AT_decl_file and - // DW_AT_decl_line fields. + // Create fake but valid subroutine type. Otherwise -verify would fail, and + // subprogram DIE will miss DW_AT_decl_file and DW_AT_decl_line fields. return DBuilder.createSubroutineType(F, DBuilder.getOrCreateTypeArray(None)); @@ -2515,11 +2504,10 @@ llvm::DICompositeType CGDebugInfo::getOrCreateFunctionType(const Decl *D, Elts.push_back(getOrCreateType(ResultTy, F)); // "self" pointer is always first argument. QualType SelfDeclTy = OMethod->getSelfDecl()->getType(); - llvm::DIType SelfTy = getOrCreateType(SelfDeclTy, F); - Elts.push_back(CreateSelfType(SelfDeclTy, SelfTy)); + Elts.push_back(CreateSelfType(SelfDeclTy, getOrCreateType(SelfDeclTy, F))); // "_cmd" pointer is always second argument. - llvm::DIType CmdTy = getOrCreateType(OMethod->getCmdDecl()->getType(), F); - Elts.push_back(DBuilder.createArtificialType(CmdTy)); + Elts.push_back(DBuilder.createArtificialType( + getOrCreateType(OMethod->getCmdDecl()->getType(), F))); // Get rest of the arguments. for (const auto *PI : OMethod->params()) Elts.push_back(getOrCreateType(PI->getType(), F)); @@ -2527,7 +2515,7 @@ llvm::DICompositeType CGDebugInfo::getOrCreateFunctionType(const Decl *D, if (OMethod->isVariadic()) Elts.push_back(DBuilder.createUnspecifiedParameter()); - llvm::DITypeArray EltTypeArray = DBuilder.getOrCreateTypeArray(Elts); + llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(Elts); return DBuilder.createSubroutineType(F, EltTypeArray); } @@ -2541,11 +2529,11 @@ llvm::DICompositeType CGDebugInfo::getOrCreateFunctionType(const Decl *D, for (unsigned i = 0, e = FPT->getNumParams(); i != e; ++i) EltTys.push_back(getOrCreateType(FPT->getParamType(i), F)); EltTys.push_back(DBuilder.createUnspecifiedParameter()); - llvm::DITypeArray EltTypeArray = DBuilder.getOrCreateTypeArray(EltTys); + llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(EltTys); return DBuilder.createSubroutineType(F, EltTypeArray); } - return llvm::DICompositeType(getOrCreateType(FnType, F)); + return cast<llvm::DISubroutineType>(getOrCreateType(FnType, F)); } /// EmitFunctionStart - Constructs the debug code for entering a function. @@ -2562,20 +2550,19 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc, bool HasDecl = (D != nullptr); unsigned Flags = 0; - llvm::DIFile Unit = getOrCreateFile(Loc); - llvm::DIDescriptor FDContext(Unit); - llvm::DIArray TParamsArray; + llvm::DIFile *Unit = getOrCreateFile(Loc); + llvm::DIScope *FDContext = Unit; + llvm::DINodeArray TParamsArray; if (!HasDecl) { // Use llvm function name. LinkageName = Fn->getName(); } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { - // If there is a DISubprogram for this function available then use it. + // If there is a subprogram for this function available then use it. auto FI = SPCache.find(FD->getCanonicalDecl()); if (FI != SPCache.end()) { - llvm::DIDescriptor SP(dyn_cast_or_null<llvm::MDNode>(FI->second)); - if (SP.isSubprogram() && llvm::DISubprogram(SP).isDefinition()) { - llvm::MDNode *SPN = SP; - LexicalBlockStack.emplace_back(SPN); + auto *SP = dyn_cast_or_null<llvm::DISubprogram>(FI->second); + if (SP && SP->isDefinition()) { + LexicalBlockStack.emplace_back(SP); RegionMap[D].reset(SP); return; } @@ -2584,17 +2571,17 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc, TParamsArray, Flags); } else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D)) { Name = getObjCMethodName(OMD); - Flags |= llvm::DIDescriptor::FlagPrototyped; + Flags |= llvm::DINode::FlagPrototyped; } else { // Use llvm function name. Name = Fn->getName(); - Flags |= llvm::DIDescriptor::FlagPrototyped; + Flags |= llvm::DINode::FlagPrototyped; } if (!Name.empty() && Name[0] == '\01') Name = Name.substr(1); if (!HasDecl || D->isImplicit()) { - Flags |= llvm::DIDescriptor::FlagArtificial; + Flags |= llvm::DINode::FlagArtificial; // Artificial functions without a location should not silently reuse CurLoc. if (Loc.isInvalid()) CurLoc = SourceLocation(); @@ -2607,11 +2594,11 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc, // FunctionDecls. When/if we fix this we can have FDContext be TheCU/null for // all subprograms instead of the actual context since subprogram definitions // are emitted as CU level entities by the backend. - llvm::DISubprogram SP = DBuilder.createFunction( + llvm::DISubprogram *SP = DBuilder.createFunction( FDContext, Name, LinkageName, Unit, LineNo, getOrCreateFunctionType(D, FnType, Unit), Fn->hasInternalLinkage(), true /*definition*/, ScopeLine, Flags, CGM.getLangOpts().Optimize, Fn, - TParamsArray, getFunctionDeclaration(D)); + TParamsArray.get(), getFunctionDeclaration(D)); // We might get here with a VarDecl in the case we're generating // code for the initialization of globals. Do not record these decls // as they will overwrite the actual VarDecl Decl in the cache. @@ -2619,8 +2606,7 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc, DeclCache[D->getCanonicalDecl()].reset(static_cast<llvm::Metadata *>(SP)); // Push the function onto the lexical block stack. - llvm::MDNode *SPN = SP; - LexicalBlockStack.emplace_back(SPN); + LexicalBlockStack.emplace_back(SP); if (HasDecl) RegionMap[D].reset(SP); @@ -2629,8 +2615,7 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc, /// EmitLocation - Emit metadata to indicate a change in line/column /// information in the source file. If the location is invalid, the /// previous location will be reused. -void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc, - bool ForceColumnInfo) { +void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc) { // Update our current location setLocation(Loc); @@ -2639,7 +2624,7 @@ void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc, llvm::MDNode *Scope = LexicalBlockStack.back(); Builder.SetCurrentDebugLocation(llvm::DebugLoc::get( - getLineNumber(CurLoc), getColumnNumber(CurLoc, ForceColumnInfo), Scope)); + getLineNumber(CurLoc), getColumnNumber(CurLoc), Scope)); } /// CreateLexicalBlock - Creates a new lexical block node and pushes it on @@ -2648,11 +2633,9 @@ void CGDebugInfo::CreateLexicalBlock(SourceLocation Loc) { llvm::MDNode *Back = nullptr; if (!LexicalBlockStack.empty()) Back = LexicalBlockStack.back().get(); - llvm::DIDescriptor D = DBuilder.createLexicalBlock( - llvm::DIDescriptor(Back), getOrCreateFile(CurLoc), getLineNumber(CurLoc), - getColumnNumber(CurLoc)); - llvm::MDNode *DN = D; - LexicalBlockStack.emplace_back(DN); + LexicalBlockStack.emplace_back(DBuilder.createLexicalBlock( + cast<llvm::DIScope>(Back), getOrCreateFile(CurLoc), getLineNumber(CurLoc), + getColumnNumber(CurLoc))); } /// EmitLexicalBlockStart - Constructs the debug code for entering a declarative @@ -2705,15 +2688,15 @@ void CGDebugInfo::EmitFunctionEnd(CGBuilderTy &Builder) { // EmitTypeForVarWithBlocksAttr - Build up structure info for the byref. // See BuildByRefType. -llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD, - uint64_t *XOffset) { +llvm::DIType *CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD, + uint64_t *XOffset) { SmallVector<llvm::Metadata *, 5> EltTys; QualType FType; uint64_t FieldSize, FieldOffset; unsigned FieldAlign; - llvm::DIFile Unit = getOrCreateFile(VD->getLocation()); + llvm::DIFile *Unit = getOrCreateFile(VD->getLocation()); QualType Type = VD->getType(); FieldOffset = 0; @@ -2760,7 +2743,7 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD, } FType = Type; - llvm::DIType FieldTy = getOrCreateType(FType, Unit); + llvm::DIType *FieldTy = getOrCreateType(FType, Unit); FieldSize = CGM.getContext().getTypeSize(FType); FieldAlign = CGM.getContext().toBits(Align); @@ -2770,16 +2753,16 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD, EltTys.push_back(FieldTy); FieldOffset += FieldSize; - llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys); + llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys); - unsigned Flags = llvm::DIDescriptor::FlagBlockByrefStruct; + unsigned Flags = llvm::DINode::FlagBlockByrefStruct; return DBuilder.createStructType(Unit, "", Unit, 0, FieldOffset, 0, Flags, - llvm::DIType(), Elements); + nullptr, Elements); } /// EmitDeclare - Emit local variable declaration debug info. -void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::dwarf::LLVMConstants Tag, +void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::dwarf::Tag Tag, llvm::Value *Storage, unsigned ArgNo, CGBuilderTy &Builder) { assert(DebugKind >= CodeGenOptions::LimitedDebugInfo); @@ -2788,10 +2771,10 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::dwarf::LLVMConstants Tag, bool Unwritten = VD->isImplicit() || (isa<Decl>(VD->getDeclContext()) && cast<Decl>(VD->getDeclContext())->isImplicit()); - llvm::DIFile Unit; + llvm::DIFile *Unit = nullptr; if (!Unwritten) Unit = getOrCreateFile(VD->getLocation()); - llvm::DIType Ty; + llvm::DIType *Ty; uint64_t XOffset = 0; if (VD->hasAttr<BlocksAttr>()) Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset); @@ -2810,58 +2793,64 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::dwarf::LLVMConstants Tag, Line = getLineNumber(VD->getLocation()); Column = getColumnNumber(VD->getLocation()); } + SmallVector<int64_t, 9> Expr; unsigned Flags = 0; if (VD->isImplicit()) - Flags |= llvm::DIDescriptor::FlagArtificial; + Flags |= llvm::DINode::FlagArtificial; // If this is the first argument and it is implicit then // give it an object pointer flag. // FIXME: There has to be a better way to do this, but for static // functions there won't be an implicit param at arg1 and // otherwise it is 'self' or 'this'. if (isa<ImplicitParamDecl>(VD) && ArgNo == 1) - Flags |= llvm::DIDescriptor::FlagObjectPointer; + Flags |= llvm::DINode::FlagObjectPointer; if (llvm::Argument *Arg = dyn_cast<llvm::Argument>(Storage)) if (Arg->getType()->isPointerTy() && !Arg->hasByValAttr() && !VD->getType()->isPointerType()) - Flags |= llvm::DIDescriptor::FlagIndirectVariable; + Expr.push_back(llvm::dwarf::DW_OP_deref); - llvm::MDNode *Scope = LexicalBlockStack.back(); + auto *Scope = cast<llvm::DIScope>(LexicalBlockStack.back()); StringRef Name = VD->getName(); if (!Name.empty()) { if (VD->hasAttr<BlocksAttr>()) { CharUnits offset = CharUnits::fromQuantity(32); - SmallVector<int64_t, 9> addr; - addr.push_back(llvm::dwarf::DW_OP_plus); + Expr.push_back(llvm::dwarf::DW_OP_plus); // offset of __forwarding field offset = CGM.getContext().toCharUnitsFromBits( CGM.getTarget().getPointerWidth(0)); - addr.push_back(offset.getQuantity()); - addr.push_back(llvm::dwarf::DW_OP_deref); - addr.push_back(llvm::dwarf::DW_OP_plus); + Expr.push_back(offset.getQuantity()); + Expr.push_back(llvm::dwarf::DW_OP_deref); + Expr.push_back(llvm::dwarf::DW_OP_plus); // offset of x field offset = CGM.getContext().toCharUnitsFromBits(XOffset); - addr.push_back(offset.getQuantity()); + Expr.push_back(offset.getQuantity()); // Create the descriptor for the variable. - llvm::DIVariable D = DBuilder.createLocalVariable( - Tag, llvm::DIDescriptor(Scope), VD->getName(), Unit, Line, Ty, ArgNo); + auto *D = DBuilder.createLocalVariable(Tag, Scope, VD->getName(), Unit, + Line, Ty, ArgNo); // Insert an llvm.dbg.declare into the current block. - llvm::Instruction *Call = - DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(addr), - Builder.GetInsertBlock()); - Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope)); + DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr), + llvm::DebugLoc::get(Line, Column, Scope), + Builder.GetInsertBlock()); return; } else if (isa<VariableArrayType>(VD->getType())) - Flags |= llvm::DIDescriptor::FlagIndirectVariable; + Expr.push_back(llvm::dwarf::DW_OP_deref); } else if (const RecordType *RT = dyn_cast<RecordType>(VD->getType())) { // If VD is an anonymous union then Storage represents value for // all union fields. const RecordDecl *RD = cast<RecordDecl>(RT->getDecl()); if (RD->isUnion() && RD->isAnonymousStructOrUnion()) { + // GDB has trouble finding local variables in anonymous unions, so we emit + // artifical local variables for each of the members. + // + // FIXME: Remove this code as soon as GDB supports this. + // The debug info verifier in LLVM operates based on the assumption that a + // variable has the same size as its storage and we had to disable the check + // for artificial variables. for (const auto *Field : RD->fields()) { - llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit); + llvm::DIType *FieldTy = getOrCreateType(Field->getType(), Unit); StringRef FieldName = Field->getName(); // Ignore unnamed fields. Do not ignore unnamed records. @@ -2869,28 +2858,28 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::dwarf::LLVMConstants Tag, continue; // Use VarDecl's Tag, Scope and Line number. - llvm::DIVariable D = DBuilder.createLocalVariable( - Tag, llvm::DIDescriptor(Scope), FieldName, Unit, Line, FieldTy, - CGM.getLangOpts().Optimize, Flags, ArgNo); + auto *D = DBuilder.createLocalVariable( + Tag, Scope, FieldName, Unit, Line, FieldTy, + CGM.getLangOpts().Optimize, Flags | llvm::DINode::FlagArtificial, + ArgNo); // Insert an llvm.dbg.declare into the current block. - llvm::Instruction *Call = DBuilder.insertDeclare( - Storage, D, DBuilder.createExpression(), Builder.GetInsertBlock()); - Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope)); + DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr), + llvm::DebugLoc::get(Line, Column, Scope), + Builder.GetInsertBlock()); } - return; } } // Create the descriptor for the variable. - llvm::DIVariable D = DBuilder.createLocalVariable( - Tag, llvm::DIDescriptor(Scope), Name, Unit, Line, Ty, - CGM.getLangOpts().Optimize, Flags, ArgNo); + auto *D = + DBuilder.createLocalVariable(Tag, Scope, Name, Unit, Line, Ty, + CGM.getLangOpts().Optimize, Flags, ArgNo); // Insert an llvm.dbg.declare into the current block. - llvm::Instruction *Call = DBuilder.insertDeclare( - Storage, D, DBuilder.createExpression(), Builder.GetInsertBlock()); - Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope)); + DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr), + llvm::DebugLoc::get(Line, Column, Scope), + Builder.GetInsertBlock()); } void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD, @@ -2906,9 +2895,9 @@ void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD, /// never happen though, since creating a type for the implicit self /// argument implies that we already parsed the interface definition /// and the ivar declarations in the implementation. -llvm::DIType CGDebugInfo::CreateSelfType(const QualType &QualTy, - llvm::DIType Ty) { - llvm::DIType CachedTy = getTypeOrNull(QualTy); +llvm::DIType *CGDebugInfo::CreateSelfType(const QualType &QualTy, + llvm::DIType *Ty) { + llvm::DIType *CachedTy = getTypeOrNull(QualTy); if (CachedTy) Ty = CachedTy; return DBuilder.createObjectPointerType(Ty); @@ -2926,8 +2915,8 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable( bool isByRef = VD->hasAttr<BlocksAttr>(); uint64_t XOffset = 0; - llvm::DIFile Unit = getOrCreateFile(VD->getLocation()); - llvm::DIType Ty; + llvm::DIFile *Unit = getOrCreateFile(VD->getLocation()); + llvm::DIType *Ty; if (isByRef) Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset); else @@ -2968,19 +2957,19 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable( } // Create the descriptor for the variable. - llvm::DIVariable D = - DBuilder.createLocalVariable(llvm::dwarf::DW_TAG_auto_variable, - llvm::DIDescriptor(LexicalBlockStack.back()), - VD->getName(), Unit, Line, Ty); + auto *D = DBuilder.createLocalVariable( + llvm::dwarf::DW_TAG_auto_variable, + cast<llvm::DILocalScope>(LexicalBlockStack.back()), VD->getName(), Unit, + Line, Ty); // Insert an llvm.dbg.declare into the current block. - llvm::Instruction *Call = InsertPoint ? - DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(addr), - InsertPoint) - : DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(addr), - Builder.GetInsertBlock()); - Call->setDebugLoc( - llvm::DebugLoc::get(Line, Column, LexicalBlockStack.back())); + auto DL = llvm::DebugLoc::get(Line, Column, LexicalBlockStack.back()); + if (InsertPoint) + DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(addr), DL, + InsertPoint); + else + DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(addr), DL, + Builder.GetInsertBlock()); } /// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument @@ -3013,7 +3002,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block, // Collect some general information about the block's location. SourceLocation loc = blockDecl->getCaretLocation(); - llvm::DIFile tunit = getOrCreateFile(loc); + llvm::DIFile *tunit = getOrCreateFile(loc); unsigned line = getLineNumber(loc); unsigned column = getColumnNumber(loc); @@ -3096,7 +3085,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block, const VarDecl *variable = capture->getVariable(); StringRef name = variable->getName(); - llvm::DIType fieldType; + llvm::DIType *fieldType; if (capture->isByRef()) { TypeInfo PtrInfo = C.getTypeInfo(C.VoidPtrTy); @@ -3118,70 +3107,67 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block, llvm::raw_svector_ostream(typeName) << "__block_literal_" << CGM.getUniqueBlockCount(); - llvm::DIArray fieldsArray = DBuilder.getOrCreateArray(fields); + llvm::DINodeArray fieldsArray = DBuilder.getOrCreateArray(fields); - llvm::DIType type = - DBuilder.createStructType(tunit, typeName.str(), tunit, line, - CGM.getContext().toBits(block.BlockSize), - CGM.getContext().toBits(block.BlockAlign), 0, - llvm::DIType(), fieldsArray); + llvm::DIType *type = DBuilder.createStructType( + tunit, typeName.str(), tunit, line, + CGM.getContext().toBits(block.BlockSize), + CGM.getContext().toBits(block.BlockAlign), 0, nullptr, fieldsArray); type = DBuilder.createPointerType(type, CGM.PointerWidthInBits); // Get overall information about the block. - unsigned flags = llvm::DIDescriptor::FlagArtificial; - llvm::MDNode *scope = LexicalBlockStack.back(); + unsigned flags = llvm::DINode::FlagArtificial; + auto *scope = cast<llvm::DILocalScope>(LexicalBlockStack.back()); // Create the descriptor for the parameter. - llvm::DIVariable debugVar = DBuilder.createLocalVariable( - llvm::dwarf::DW_TAG_arg_variable, llvm::DIDescriptor(scope), - Arg->getName(), tunit, line, type, CGM.getLangOpts().Optimize, flags, - ArgNo); + auto *debugVar = DBuilder.createLocalVariable( + llvm::dwarf::DW_TAG_arg_variable, scope, Arg->getName(), tunit, line, + type, CGM.getLangOpts().Optimize, flags, ArgNo); if (LocalAddr) { // Insert an llvm.dbg.value into the current block. - llvm::Instruction *DbgVal = DBuilder.insertDbgValueIntrinsic( + DBuilder.insertDbgValueIntrinsic( LocalAddr, 0, debugVar, DBuilder.createExpression(), - Builder.GetInsertBlock()); - DbgVal->setDebugLoc(llvm::DebugLoc::get(line, column, scope)); + llvm::DebugLoc::get(line, column, scope), Builder.GetInsertBlock()); } // Insert an llvm.dbg.declare into the current block. - llvm::Instruction *DbgDecl = DBuilder.insertDeclare( - Arg, debugVar, DBuilder.createExpression(), Builder.GetInsertBlock()); - DbgDecl->setDebugLoc(llvm::DebugLoc::get(line, column, scope)); + DBuilder.insertDeclare(Arg, debugVar, DBuilder.createExpression(), + llvm::DebugLoc::get(line, column, scope), + Builder.GetInsertBlock()); } /// If D is an out-of-class definition of a static data member of a class, find /// its corresponding in-class declaration. -llvm::DIDerivedType +llvm::DIDerivedType * CGDebugInfo::getOrCreateStaticDataMemberDeclarationOrNull(const VarDecl *D) { if (!D->isStaticDataMember()) - return llvm::DIDerivedType(); + return nullptr; + auto MI = StaticDataMemberCache.find(D->getCanonicalDecl()); if (MI != StaticDataMemberCache.end()) { assert(MI->second && "Static data member declaration should still exist"); - return llvm::DIDerivedType(cast<llvm::MDNode>(MI->second)); + return cast<llvm::DIDerivedType>(MI->second); } // If the member wasn't found in the cache, lazily construct and add it to the // type (used when a limited form of the type is emitted). auto DC = D->getDeclContext(); - llvm::DICompositeType Ctxt(getContextDescriptor(cast<Decl>(DC))); + auto *Ctxt = + cast<llvm::DICompositeType>(getContextDescriptor(cast<Decl>(DC))); return CreateRecordStaticField(D, Ctxt, cast<RecordDecl>(DC)); } /// Recursively collect all of the member fields of a global anonymous decl and /// create static variables for them. The first time this is called it needs /// to be on a union and then from there we can have additional unnamed fields. -llvm::DIGlobalVariable -CGDebugInfo::CollectAnonRecordDecls(const RecordDecl *RD, llvm::DIFile Unit, - unsigned LineNo, StringRef LinkageName, - llvm::GlobalVariable *Var, - llvm::DIDescriptor DContext) { - llvm::DIGlobalVariable GV; +llvm::DIGlobalVariable *CGDebugInfo::CollectAnonRecordDecls( + const RecordDecl *RD, llvm::DIFile *Unit, unsigned LineNo, + StringRef LinkageName, llvm::GlobalVariable *Var, llvm::DIScope *DContext) { + llvm::DIGlobalVariable *GV = nullptr; for (const auto *Field : RD->fields()) { - llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit); + llvm::DIType *FieldTy = getOrCreateType(Field->getType(), Unit); StringRef FieldName = Field->getName(); // Ignore unnamed fields, but recurse into anonymous records. @@ -3193,9 +3179,9 @@ CGDebugInfo::CollectAnonRecordDecls(const RecordDecl *RD, llvm::DIFile Unit, continue; } // Use VarDecl's Tag, Scope and Line number. - GV = DBuilder.createGlobalVariable( - DContext, FieldName, LinkageName, Unit, LineNo, FieldTy, - Var->hasInternalLinkage(), Var, llvm::DIDerivedType()); + GV = DBuilder.createGlobalVariable(DContext, FieldName, LinkageName, Unit, + LineNo, FieldTy, + Var->hasInternalLinkage(), Var, nullptr); } return GV; } @@ -3205,8 +3191,8 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var, const VarDecl *D) { assert(DebugKind >= CodeGenOptions::LimitedDebugInfo); // Create global variable debug descriptor. - llvm::DIFile Unit; - llvm::DIDescriptor DContext; + llvm::DIFile *Unit = nullptr; + llvm::DIScope *DContext = nullptr; unsigned LineNo; StringRef DeclName, LinkageName; QualType T; @@ -3214,7 +3200,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var, // Attempt to store one global variable for the declaration - even if we // emit a lot of fields. - llvm::DIGlobalVariable GV; + llvm::DIGlobalVariable *GV = nullptr; // If this is an anonymous union then we'll want to emit a global // variable for each member of the anonymous union so that it's possible @@ -3238,16 +3224,18 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, llvm::Constant *Init) { assert(DebugKind >= CodeGenOptions::LimitedDebugInfo); // Create the descriptor for the variable. - llvm::DIFile Unit = getOrCreateFile(VD->getLocation()); + llvm::DIFile *Unit = getOrCreateFile(VD->getLocation()); StringRef Name = VD->getName(); - llvm::DIType Ty = getOrCreateType(VD->getType(), Unit); + llvm::DIType *Ty = getOrCreateType(VD->getType(), Unit); if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(VD)) { const EnumDecl *ED = cast<EnumDecl>(ECD->getDeclContext()); assert(isa<EnumType>(ED->getTypeForDecl()) && "Enum without EnumType?"); Ty = getOrCreateType(QualType(ED->getTypeForDecl(), 0), Unit); } - // Do not use DIGlobalVariable for enums. - if (Ty.getTag() == llvm::dwarf::DW_TAG_enumeration_type) + // Do not use global variables for enums. + // + // FIXME: why not? + if (Ty->getTag() == llvm::dwarf::DW_TAG_enumeration_type) return; // Do not emit separate definitions for function local const/statics. if (isa<FunctionDecl>(VD->getDeclContext())) @@ -3263,7 +3251,7 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, return; } - llvm::DIDescriptor DContext = + llvm::DIScope *DContext = getContextDescriptor(dyn_cast<Decl>(VD->getDeclContext())); auto &GV = DeclCache[VD]; @@ -3274,9 +3262,9 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, true, Init, getOrCreateStaticDataMemberDeclarationOrNull(VarD))); } -llvm::DIScope CGDebugInfo::getCurrentContextDescriptor(const Decl *D) { +llvm::DIScope *CGDebugInfo::getCurrentContextDescriptor(const Decl *D) { if (!LexicalBlockStack.empty()) - return llvm::DIScope(LexicalBlockStack.back()); + return LexicalBlockStack.back(); return getContextDescriptor(D); } @@ -3297,21 +3285,21 @@ void CGDebugInfo::EmitUsingDecl(const UsingDecl &UD) { // Emitting one decl is sufficient - debuggers can detect that this is an // overloaded name & provide lookup for all the overloads. const UsingShadowDecl &USD = **UD.shadow_begin(); - if (llvm::DIDescriptor Target = + if (llvm::DINode *Target = getDeclarationOrDefinition(USD.getUnderlyingDecl())) DBuilder.createImportedDeclaration( getCurrentContextDescriptor(cast<Decl>(USD.getDeclContext())), Target, getLineNumber(USD.getLocation())); } -llvm::DIImportedEntity +llvm::DIImportedEntity * CGDebugInfo::EmitNamespaceAlias(const NamespaceAliasDecl &NA) { if (CGM.getCodeGenOpts().getDebugInfo() < CodeGenOptions::LimitedDebugInfo) - return llvm::DIImportedEntity(nullptr); + return nullptr; auto &VH = NamespaceAliasCache[&NA]; if (VH) - return llvm::DIImportedEntity(cast<llvm::MDNode>(VH)); - llvm::DIImportedEntity R(nullptr); + return cast<llvm::DIImportedEntity>(VH); + llvm::DIImportedEntity *R; if (const NamespaceAliasDecl *Underlying = dyn_cast<NamespaceAliasDecl>(NA.getAliasedNamespace())) // This could cache & dedup here rather than relying on metadata deduping. @@ -3330,19 +3318,19 @@ CGDebugInfo::EmitNamespaceAlias(const NamespaceAliasDecl &NA) { /// getOrCreateNamesSpace - Return namespace descriptor for the given /// namespace decl. -llvm::DINameSpace +llvm::DINamespace * CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl) { NSDecl = NSDecl->getCanonicalDecl(); auto I = NameSpaceCache.find(NSDecl); if (I != NameSpaceCache.end()) - return llvm::DINameSpace(cast<llvm::MDNode>(I->second)); + return cast<llvm::DINamespace>(I->second); unsigned LineNo = getLineNumber(NSDecl->getLocation()); - llvm::DIFile FileD = getOrCreateFile(NSDecl->getLocation()); - llvm::DIDescriptor Context = - getContextDescriptor(dyn_cast<Decl>(NSDecl->getDeclContext())); - llvm::DINameSpace NS = - DBuilder.createNameSpace(Context, NSDecl->getName(), FileD, LineNo); + llvm::DIFile *FileD = getOrCreateFile(NSDecl->getLocation()); + llvm::DIScope *Context = + getContextDescriptor(dyn_cast<Decl>(NSDecl->getDeclContext())); + llvm::DINamespace *NS = + DBuilder.createNameSpace(Context, NSDecl->getName(), FileD, LineNo); NameSpaceCache[NSDecl].reset(NS); return NS; } @@ -3352,28 +3340,28 @@ void CGDebugInfo::finalize() { // element and the size(), so don't cache/reference them. for (size_t i = 0; i != ObjCInterfaceCache.size(); ++i) { ObjCInterfaceCacheEntry E = ObjCInterfaceCache[i]; - E.Decl.replaceAllUsesWith(CGM.getLLVMContext(), - E.Type->getDecl()->getDefinition() - ? CreateTypeDefinition(E.Type, E.Unit) - : E.Decl); + llvm::DIType *Ty = E.Type->getDecl()->getDefinition() + ? CreateTypeDefinition(E.Type, E.Unit) + : E.Decl; + DBuilder.replaceTemporary(llvm::TempDIType(E.Decl), Ty); } for (auto p : ReplaceMap) { assert(p.second); - llvm::DIType Ty(cast<llvm::MDNode>(p.second)); - assert(Ty.isForwardDecl()); + auto *Ty = cast<llvm::DIType>(p.second); + assert(Ty->isForwardDecl()); auto it = TypeCache.find(p.first); assert(it != TypeCache.end()); assert(it->second); - llvm::DIType RepTy(cast<llvm::MDNode>(it->second)); - Ty.replaceAllUsesWith(CGM.getLLVMContext(), RepTy); + DBuilder.replaceTemporary(llvm::TempDIType(Ty), + cast<llvm::DIType>(it->second)); } for (const auto &p : FwdDeclReplaceMap) { assert(p.second); - llvm::DIDescriptor FwdDecl(cast<llvm::MDNode>(p.second)); + llvm::TempMDNode FwdDecl(cast<llvm::MDNode>(p.second)); llvm::Metadata *Repl; auto it = DeclCache.find(p.first); @@ -3385,15 +3373,14 @@ void CGDebugInfo::finalize() { else Repl = it->second; - FwdDecl.replaceAllUsesWith(CGM.getLLVMContext(), - llvm::DIDescriptor(cast<llvm::MDNode>(Repl))); + DBuilder.replaceTemporary(std::move(FwdDecl), cast<llvm::MDNode>(Repl)); } // We keep our own list of retained types, because we need to look // up the final type in the type cache. for (std::vector<void *>::const_iterator RI = RetainedTypes.begin(), RE = RetainedTypes.end(); RI != RE; ++RI) - DBuilder.retainType(llvm::DIType(cast<llvm::MDNode>(TypeCache[*RI]))); + DBuilder.retainType(cast<llvm::DIType>(TypeCache[*RI])); DBuilder.finalize(); } @@ -3401,7 +3388,8 @@ void CGDebugInfo::finalize() { void CGDebugInfo::EmitExplicitCastType(QualType Ty) { if (CGM.getCodeGenOpts().getDebugInfo() < CodeGenOptions::LimitedDebugInfo) return; - llvm::DIType DieTy = getOrCreateType(Ty, getOrCreateMainFile()); - // Don't ignore in case of explicit cast where it is referenced indirectly. - DBuilder.retainType(DieTy); + + if (auto *DieTy = getOrCreateType(Ty, getOrCreateMainFile())) + // Don't ignore in case of explicit cast where it is referenced indirectly. + DBuilder.retainType(DieTy); } diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h index 0be032c..8509e07 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h @@ -43,47 +43,50 @@ namespace CodeGen { class CodeGenFunction; class CGBlockInfo; -/// CGDebugInfo - This class gathers all debug information during compilation +/// \brief This class gathers all debug information during compilation /// and is responsible for emitting to llvm globals or pass directly to /// the backend. class CGDebugInfo { - friend class ArtificialLocation; + friend class ApplyDebugLocation; friend class SaveAndRestoreLocation; CodeGenModule &CGM; const CodeGenOptions::DebugInfoKind DebugKind; llvm::DIBuilder DBuilder; - llvm::DICompileUnit TheCU; + llvm::DICompileUnit *TheCU = nullptr; SourceLocation CurLoc; - llvm::DIType VTablePtrType; - llvm::DIType ClassTy; - llvm::DICompositeType ObjTy; - llvm::DIType SelTy; - llvm::DIType OCLImage1dDITy, OCLImage1dArrayDITy, OCLImage1dBufferDITy; - llvm::DIType OCLImage2dDITy, OCLImage2dArrayDITy; - llvm::DIType OCLImage3dDITy; - llvm::DIType OCLEventDITy; - llvm::DIType BlockLiteralGeneric; - - /// TypeCache - Cache of previously constructed Types. + llvm::DIType *VTablePtrType = nullptr; + llvm::DIType *ClassTy = nullptr; + llvm::DICompositeType *ObjTy = nullptr; + llvm::DIType *SelTy = nullptr; + llvm::DIType *OCLImage1dDITy = nullptr; + llvm::DIType *OCLImage1dArrayDITy = nullptr; + llvm::DIType *OCLImage1dBufferDITy = nullptr; + llvm::DIType *OCLImage2dDITy = nullptr; + llvm::DIType *OCLImage2dArrayDITy = nullptr; + llvm::DIType *OCLImage3dDITy = nullptr; + llvm::DIType *OCLEventDITy = nullptr; + llvm::DIType *BlockLiteralGeneric = nullptr; + + /// \brief Cache of previously constructed Types. llvm::DenseMap<const void *, llvm::TrackingMDRef> TypeCache; struct ObjCInterfaceCacheEntry { const ObjCInterfaceType *Type; - llvm::DIType Decl; - llvm::DIFile Unit; - ObjCInterfaceCacheEntry(const ObjCInterfaceType *Type, llvm::DIType Decl, - llvm::DIFile Unit) + llvm::DIType *Decl; + llvm::DIFile *Unit; + ObjCInterfaceCacheEntry(const ObjCInterfaceType *Type, llvm::DIType *Decl, + llvm::DIFile *Unit) : Type(Type), Decl(Decl), Unit(Unit) {} }; - /// ObjCInterfaceCache - Cache of previously constructed interfaces + /// \brief Cache of previously constructed interfaces /// which may change. llvm::SmallVector<ObjCInterfaceCacheEntry, 32> ObjCInterfaceCache; - /// RetainedTypes - list of interfaces we want to keep even if orphaned. + /// \brief list of interfaces we want to keep even if orphaned. std::vector<void *> RetainedTypes; - /// ReplaceMap - Cache of forward declared types to RAUW at the end of + /// \brief Cache of forward declared types to RAUW at the end of /// compilation. std::vector<std::pair<const TagType *, llvm::TrackingMDRef>> ReplaceMap; @@ -93,14 +96,14 @@ class CGDebugInfo { FwdDeclReplaceMap; // LexicalBlockStack - Keep track of our current nested lexical block. - std::vector<llvm::TrackingMDNodeRef> LexicalBlockStack; + std::vector<llvm::TypedTrackingMDRef<llvm::DIScope>> LexicalBlockStack; llvm::DenseMap<const Decl *, llvm::TrackingMDRef> RegionMap; // FnBeginRegionCount - Keep track of LexicalBlockStack counter at the // beginning of a function. This is used to pop unbalanced regions at // the end of a function. std::vector<unsigned> FnBeginRegionCount; - /// DebugInfoNames - This is a storage for names that are + /// \brief This is a storage for names that are /// constructed on demand. For example, C++ destructors, C++ operators etc.. llvm::BumpPtrAllocator DebugInfoNames; StringRef CWDName; @@ -117,95 +120,94 @@ class CGDebugInfo { /// Helper functions for getOrCreateType. unsigned Checksum(const ObjCInterfaceDecl *InterfaceDecl); - llvm::DIType CreateType(const BuiltinType *Ty); - llvm::DIType CreateType(const ComplexType *Ty); - llvm::DIType CreateQualifiedType(QualType Ty, llvm::DIFile Fg); - llvm::DIType CreateType(const TypedefType *Ty, llvm::DIFile Fg); - llvm::DIType CreateType(const TemplateSpecializationType *Ty, llvm::DIFile Fg); - llvm::DIType CreateType(const ObjCObjectPointerType *Ty, - llvm::DIFile F); - llvm::DIType CreateType(const PointerType *Ty, llvm::DIFile F); - llvm::DIType CreateType(const BlockPointerType *Ty, llvm::DIFile F); - llvm::DIType CreateType(const FunctionType *Ty, llvm::DIFile F); - llvm::DIType CreateType(const RecordType *Tyg); - llvm::DIType CreateTypeDefinition(const RecordType *Ty); - llvm::DICompositeType CreateLimitedType(const RecordType *Ty); - void CollectContainingType(const CXXRecordDecl *RD, llvm::DICompositeType CT); - llvm::DIType CreateType(const ObjCInterfaceType *Ty, llvm::DIFile F); - llvm::DIType CreateTypeDefinition(const ObjCInterfaceType *Ty, llvm::DIFile F); - llvm::DIType CreateType(const ObjCObjectType *Ty, llvm::DIFile F); - llvm::DIType CreateType(const VectorType *Ty, llvm::DIFile F); - llvm::DIType CreateType(const ArrayType *Ty, llvm::DIFile F); - llvm::DIType CreateType(const LValueReferenceType *Ty, llvm::DIFile F); - llvm::DIType CreateType(const RValueReferenceType *Ty, llvm::DIFile Unit); - llvm::DIType CreateType(const MemberPointerType *Ty, llvm::DIFile F); - llvm::DIType CreateType(const AtomicType *Ty, llvm::DIFile F); - llvm::DIType CreateEnumType(const EnumType *Ty); - llvm::DIType CreateTypeDefinition(const EnumType *Ty); - llvm::DIType CreateSelfType(const QualType &QualTy, llvm::DIType Ty); - llvm::DIType getTypeOrNull(const QualType); - llvm::DICompositeType getOrCreateMethodType(const CXXMethodDecl *Method, - llvm::DIFile F); - llvm::DICompositeType getOrCreateInstanceMethodType( - QualType ThisPtr, const FunctionProtoType *Func, llvm::DIFile Unit); - llvm::DICompositeType getOrCreateFunctionType(const Decl *D, QualType FnType, - llvm::DIFile F); - llvm::DIType getOrCreateVTablePtrType(llvm::DIFile F); - llvm::DINameSpace getOrCreateNameSpace(const NamespaceDecl *N); - llvm::DIType getOrCreateTypeDeclaration(QualType PointeeTy, llvm::DIFile F); - llvm::DIType CreatePointerLikeType(llvm::dwarf::Tag Tag, - const Type *Ty, QualType PointeeTy, - llvm::DIFile F); + llvm::DIType *CreateType(const BuiltinType *Ty); + llvm::DIType *CreateType(const ComplexType *Ty); + llvm::DIType *CreateQualifiedType(QualType Ty, llvm::DIFile *Fg); + llvm::DIType *CreateType(const TypedefType *Ty, llvm::DIFile *Fg); + llvm::DIType *CreateType(const TemplateSpecializationType *Ty, + llvm::DIFile *Fg); + llvm::DIType *CreateType(const ObjCObjectPointerType *Ty, llvm::DIFile *F); + llvm::DIType *CreateType(const PointerType *Ty, llvm::DIFile *F); + llvm::DIType *CreateType(const BlockPointerType *Ty, llvm::DIFile *F); + llvm::DIType *CreateType(const FunctionType *Ty, llvm::DIFile *F); + llvm::DIType *CreateType(const RecordType *Tyg); + llvm::DIType *CreateTypeDefinition(const RecordType *Ty); + llvm::DICompositeType *CreateLimitedType(const RecordType *Ty); + void CollectContainingType(const CXXRecordDecl *RD, + llvm::DICompositeType *CT); + llvm::DIType *CreateType(const ObjCInterfaceType *Ty, llvm::DIFile *F); + llvm::DIType *CreateTypeDefinition(const ObjCInterfaceType *Ty, + llvm::DIFile *F); + llvm::DIType *CreateType(const ObjCObjectType *Ty, llvm::DIFile *F); + llvm::DIType *CreateType(const VectorType *Ty, llvm::DIFile *F); + llvm::DIType *CreateType(const ArrayType *Ty, llvm::DIFile *F); + llvm::DIType *CreateType(const LValueReferenceType *Ty, llvm::DIFile *F); + llvm::DIType *CreateType(const RValueReferenceType *Ty, llvm::DIFile *Unit); + llvm::DIType *CreateType(const MemberPointerType *Ty, llvm::DIFile *F); + llvm::DIType *CreateType(const AtomicType *Ty, llvm::DIFile *F); + llvm::DIType *CreateEnumType(const EnumType *Ty); + llvm::DIType *CreateTypeDefinition(const EnumType *Ty); + llvm::DIType *CreateSelfType(const QualType &QualTy, llvm::DIType *Ty); + llvm::DIType *getTypeOrNull(const QualType); + llvm::DISubroutineType *getOrCreateMethodType(const CXXMethodDecl *Method, + llvm::DIFile *F); + llvm::DISubroutineType * + getOrCreateInstanceMethodType(QualType ThisPtr, const FunctionProtoType *Func, + llvm::DIFile *Unit); + llvm::DISubroutineType * + getOrCreateFunctionType(const Decl *D, QualType FnType, llvm::DIFile *F); + llvm::DIType *getOrCreateVTablePtrType(llvm::DIFile *F); + llvm::DINamespace *getOrCreateNameSpace(const NamespaceDecl *N); + llvm::DIType *getOrCreateTypeDeclaration(QualType PointeeTy, llvm::DIFile *F); + llvm::DIType *CreatePointerLikeType(llvm::dwarf::Tag Tag, const Type *Ty, + QualType PointeeTy, llvm::DIFile *F); llvm::Value *getCachedInterfaceTypeOrNull(const QualType Ty); - llvm::DIType getOrCreateStructPtrType(StringRef Name, llvm::DIType &Cache); + llvm::DIType *getOrCreateStructPtrType(StringRef Name, llvm::DIType *&Cache); - llvm::DISubprogram CreateCXXMemberFunction(const CXXMethodDecl *Method, - llvm::DIFile F, - llvm::DIType RecordTy); + llvm::DISubprogram *CreateCXXMemberFunction(const CXXMethodDecl *Method, + llvm::DIFile *F, + llvm::DIType *RecordTy); - void CollectCXXMemberFunctions(const CXXRecordDecl *Decl, llvm::DIFile F, + void CollectCXXMemberFunctions(const CXXRecordDecl *Decl, llvm::DIFile *F, SmallVectorImpl<llvm::Metadata *> &E, - llvm::DIType T); + llvm::DIType *T); - void CollectCXXBases(const CXXRecordDecl *Decl, llvm::DIFile F, + void CollectCXXBases(const CXXRecordDecl *Decl, llvm::DIFile *F, SmallVectorImpl<llvm::Metadata *> &EltTys, - llvm::DIType RecordTy); - - llvm::DIArray - CollectTemplateParams(const TemplateParameterList *TPList, - ArrayRef<TemplateArgument> TAList, - llvm::DIFile Unit); - llvm::DIArray - CollectFunctionTemplateParams(const FunctionDecl *FD, llvm::DIFile Unit); - llvm::DIArray + llvm::DIType *RecordTy); + + llvm::DINodeArray CollectTemplateParams(const TemplateParameterList *TPList, + ArrayRef<TemplateArgument> TAList, + llvm::DIFile *Unit); + llvm::DINodeArray CollectFunctionTemplateParams(const FunctionDecl *FD, + llvm::DIFile *Unit); + llvm::DINodeArray CollectCXXTemplateParams(const ClassTemplateSpecializationDecl *TS, - llvm::DIFile F); + llvm::DIFile *F); - llvm::DIType createFieldType(StringRef name, QualType type, - uint64_t sizeInBitsOverride, SourceLocation loc, - AccessSpecifier AS, - uint64_t offsetInBits, - llvm::DIFile tunit, - llvm::DIScope scope, - const RecordDecl* RD = nullptr); + llvm::DIType *createFieldType(StringRef name, QualType type, + uint64_t sizeInBitsOverride, SourceLocation loc, + AccessSpecifier AS, uint64_t offsetInBits, + llvm::DIFile *tunit, llvm::DIScope *scope, + const RecordDecl *RD = nullptr); // Helpers for collecting fields of a record. void CollectRecordLambdaFields(const CXXRecordDecl *CXXDecl, SmallVectorImpl<llvm::Metadata *> &E, - llvm::DIType RecordTy); - llvm::DIDerivedType CreateRecordStaticField(const VarDecl *Var, - llvm::DIType RecordTy, - const RecordDecl* RD); + llvm::DIType *RecordTy); + llvm::DIDerivedType *CreateRecordStaticField(const VarDecl *Var, + llvm::DIType *RecordTy, + const RecordDecl *RD); void CollectRecordNormalField(const FieldDecl *Field, uint64_t OffsetInBits, - llvm::DIFile F, + llvm::DIFile *F, SmallVectorImpl<llvm::Metadata *> &E, - llvm::DIType RecordTy, const RecordDecl *RD); - void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile F, + llvm::DIType *RecordTy, const RecordDecl *RD); + void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile *F, SmallVectorImpl<llvm::Metadata *> &E, - llvm::DICompositeType RecordTy); + llvm::DICompositeType *RecordTy); - void CollectVTableInfo(const CXXRecordDecl *Decl, llvm::DIFile F, + void CollectVTableInfo(const CXXRecordDecl *Decl, llvm::DIFile *F, SmallVectorImpl<llvm::Metadata *> &EltTys); // CreateLexicalBlock - Create a new lexical block node and push it on @@ -218,20 +220,15 @@ public: void finalize(); - /// setLocation - Update the current source location. If \arg loc is + /// \brief Update the current source location. If \arg loc is /// invalid it is ignored. void setLocation(SourceLocation Loc); - /// getLocation - Return the current source location. - SourceLocation getLocation() const { return CurLoc; } - - /// EmitLocation - Emit metadata to indicate a change in line/column + /// \brief Emit metadata to indicate a change in line/column /// information in the source file. - /// \param ForceColumnInfo Assume DebugColumnInfo option is true. - void EmitLocation(CGBuilderTy &Builder, SourceLocation Loc, - bool ForceColumnInfo = false); + void EmitLocation(CGBuilderTy &Builder, SourceLocation Loc); - /// EmitFunctionStart - Emit a call to llvm.dbg.function.start to indicate + /// \brief Emit a call to llvm.dbg.function.start to indicate /// start of a new function. /// \param Loc The location of the function header. /// \param ScopeLoc The location of the function body. @@ -240,23 +237,23 @@ public: QualType FnType, llvm::Function *Fn, CGBuilderTy &Builder); - /// EmitFunctionEnd - Constructs the debug code for exiting a function. + /// \brief Constructs the debug code for exiting a function. void EmitFunctionEnd(CGBuilderTy &Builder); - /// EmitLexicalBlockStart - Emit metadata to indicate the beginning of a + /// \brief Emit metadata to indicate the beginning of a /// new lexical block and push the block onto the stack. void EmitLexicalBlockStart(CGBuilderTy &Builder, SourceLocation Loc); - /// EmitLexicalBlockEnd - Emit metadata to indicate the end of a new lexical + /// \brief Emit metadata to indicate the end of a new lexical /// block and pop the current block. void EmitLexicalBlockEnd(CGBuilderTy &Builder, SourceLocation Loc); - /// EmitDeclareOfAutoVariable - Emit call to llvm.dbg.declare for an automatic + /// \brief Emit call to llvm.dbg.declare for an automatic /// variable declaration. void EmitDeclareOfAutoVariable(const VarDecl *Decl, llvm::Value *AI, CGBuilderTy &Builder); - /// EmitDeclareOfBlockDeclRefVariable - Emit call to llvm.dbg.declare for an + /// \brief Emit call to llvm.dbg.declare for an /// imported variable declaration in a block. void EmitDeclareOfBlockDeclRefVariable(const VarDecl *variable, llvm::Value *storage, @@ -264,12 +261,12 @@ public: const CGBlockInfo &blockInfo, llvm::Instruction *InsertPoint = 0); - /// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument + /// \brief Emit call to llvm.dbg.declare for an argument /// variable declaration. void EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI, unsigned ArgNo, CGBuilderTy &Builder); - /// EmitDeclareOfBlockLiteralArgVariable - Emit call to + /// \brief Emit call to /// llvm.dbg.declare for the block-literal argument to a block /// invocation function. void EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block, @@ -277,31 +274,30 @@ public: llvm::Value *LocalAddr, CGBuilderTy &Builder); - /// EmitGlobalVariable - Emit information about a global variable. + /// \brief Emit information about a global variable. void EmitGlobalVariable(llvm::GlobalVariable *GV, const VarDecl *Decl); - /// EmitGlobalVariable - Emit global variable's debug info. + /// \brief Emit global variable's debug info. void EmitGlobalVariable(const ValueDecl *VD, llvm::Constant *Init); - /// \brief - Emit C++ using directive. + /// \brief Emit C++ using directive. void EmitUsingDirective(const UsingDirectiveDecl &UD); - /// EmitExplicitCastType - Emit the type explicitly casted to. + /// \brief Emit the type explicitly casted to. void EmitExplicitCastType(QualType Ty); - /// \brief - Emit C++ using declaration. + /// \brief Emit C++ using declaration. void EmitUsingDecl(const UsingDecl &UD); - /// \brief - Emit C++ namespace alias. - llvm::DIImportedEntity EmitNamespaceAlias(const NamespaceAliasDecl &NA); + /// \brief Emit C++ namespace alias. + llvm::DIImportedEntity *EmitNamespaceAlias(const NamespaceAliasDecl &NA); - /// getOrCreateRecordType - Emit record type's standalone debug info. - llvm::DIType getOrCreateRecordType(QualType Ty, SourceLocation L); + /// \brief Emit record type's standalone debug info. + llvm::DIType *getOrCreateRecordType(QualType Ty, SourceLocation L); - /// getOrCreateInterfaceType - Emit an objective c interface type standalone + /// \brief Emit an objective c interface type standalone /// debug info. - llvm::DIType getOrCreateInterfaceType(QualType Ty, - SourceLocation Loc); + llvm::DIType *getOrCreateInterfaceType(QualType Ty, SourceLocation Loc); void completeType(const EnumDecl *ED); void completeType(const RecordDecl *RD); @@ -311,133 +307,129 @@ public: void completeTemplateDefinition(const ClassTemplateSpecializationDecl &SD); private: - /// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration. + /// \brief Emit call to llvm.dbg.declare for a variable declaration. /// Tag accepts custom types DW_TAG_arg_variable and DW_TAG_auto_variable, /// otherwise would be of type llvm::dwarf::Tag. - void EmitDeclare(const VarDecl *decl, llvm::dwarf::LLVMConstants Tag, - llvm::Value *AI, unsigned ArgNo, CGBuilderTy &Builder); + void EmitDeclare(const VarDecl *decl, llvm::dwarf::Tag Tag, llvm::Value *AI, + unsigned ArgNo, CGBuilderTy &Builder); // EmitTypeForVarWithBlocksAttr - Build up structure info for the byref. // See BuildByRefType. - llvm::DIType EmitTypeForVarWithBlocksAttr(const VarDecl *VD, - uint64_t *OffSet); + llvm::DIType *EmitTypeForVarWithBlocksAttr(const VarDecl *VD, + uint64_t *OffSet); - /// getContextDescriptor - Get context info for the decl. - llvm::DIScope getContextDescriptor(const Decl *Decl); + /// \brief Get context info for the decl. + llvm::DIScope *getContextDescriptor(const Decl *Decl); - llvm::DIScope getCurrentContextDescriptor(const Decl *Decl); + llvm::DIScope *getCurrentContextDescriptor(const Decl *Decl); /// \brief Create a forward decl for a RecordType in a given context. - llvm::DICompositeType getOrCreateRecordFwdDecl(const RecordType *, - llvm::DIDescriptor); - - /// createContextChain - Create a set of decls for the context chain. - llvm::DIDescriptor createContextChain(const Decl *Decl); + llvm::DICompositeType *getOrCreateRecordFwdDecl(const RecordType *, + llvm::DIScope *); - /// getCurrentDirname - Return current directory name. + /// \brief Return current directory name. StringRef getCurrentDirname(); - /// CreateCompileUnit - Create new compile unit. + /// \brief Create new compile unit. void CreateCompileUnit(); - /// getOrCreateFile - Get the file debug info descriptor for the input + /// \brief Get the file debug info descriptor for the input /// location. - llvm::DIFile getOrCreateFile(SourceLocation Loc); + llvm::DIFile *getOrCreateFile(SourceLocation Loc); - /// getOrCreateMainFile - Get the file info for main compile unit. - llvm::DIFile getOrCreateMainFile(); + /// \brief Get the file info for main compile unit. + llvm::DIFile *getOrCreateMainFile(); - /// getOrCreateType - Get the type from the cache or create a new type if + /// \brief Get the type from the cache or create a new type if /// necessary. - llvm::DIType getOrCreateType(QualType Ty, llvm::DIFile Fg); + llvm::DIType *getOrCreateType(QualType Ty, llvm::DIFile *Fg); - /// getOrCreateLimitedType - Get the type from the cache or create a new + /// \brief Get the type from the cache or create a new /// partial type if necessary. - llvm::DIType getOrCreateLimitedType(const RecordType *Ty, llvm::DIFile F); + llvm::DIType *getOrCreateLimitedType(const RecordType *Ty, llvm::DIFile *F); - /// CreateTypeNode - Create type metadata for a source language type. - llvm::DIType CreateTypeNode(QualType Ty, llvm::DIFile Fg); + /// \brief Create type metadata for a source language type. + llvm::DIType *CreateTypeNode(QualType Ty, llvm::DIFile *Fg); - /// getObjCInterfaceDecl - return the underlying ObjCInterfaceDecl + /// \brief return the underlying ObjCInterfaceDecl /// if Ty is an ObjCInterface or a pointer to one. ObjCInterfaceDecl* getObjCInterfaceDecl(QualType Ty); - /// CreateMemberType - Create new member and increase Offset by FType's size. - llvm::DIType CreateMemberType(llvm::DIFile Unit, QualType FType, - StringRef Name, uint64_t *Offset); + /// \brief Create new member and increase Offset by FType's size. + llvm::DIType *CreateMemberType(llvm::DIFile *Unit, QualType FType, + StringRef Name, uint64_t *Offset); /// \brief Retrieve the DIDescriptor, if any, for the canonical form of this /// declaration. - llvm::DIDescriptor getDeclarationOrDefinition(const Decl *D); + llvm::DINode *getDeclarationOrDefinition(const Decl *D); - /// getFunctionDeclaration - Return debug info descriptor to describe method + /// \brief Return debug info descriptor to describe method /// declaration for the given method definition. - llvm::DISubprogram getFunctionDeclaration(const Decl *D); + llvm::DISubprogram *getFunctionDeclaration(const Decl *D); /// Return debug info descriptor to describe in-class static data member /// declaration for the given out-of-class definition. - llvm::DIDerivedType + llvm::DIDerivedType * getOrCreateStaticDataMemberDeclarationOrNull(const VarDecl *D); - /// \brief Create a DISubprogram describing the forward + /// \brief Create a subprogram describing the forward /// decalration represented in the given FunctionDecl. - llvm::DISubprogram getFunctionForwardDeclaration(const FunctionDecl *FD); + llvm::DISubprogram *getFunctionForwardDeclaration(const FunctionDecl *FD); - /// \brief Create a DIGlobalVariable describing the forward - /// decalration represented in the given VarDecl. - llvm::DIGlobalVariable getGlobalVariableForwardDeclaration(const VarDecl *VD); + /// \brief Create a global variable describing the forward decalration + /// represented in the given VarDecl. + llvm::DIGlobalVariable * + getGlobalVariableForwardDeclaration(const VarDecl *VD); /// Return a global variable that represents one of the collection of /// global variables created for an anonmyous union. - llvm::DIGlobalVariable - CollectAnonRecordDecls(const RecordDecl *RD, llvm::DIFile Unit, unsigned LineNo, - StringRef LinkageName, llvm::GlobalVariable *Var, - llvm::DIDescriptor DContext); + llvm::DIGlobalVariable * + CollectAnonRecordDecls(const RecordDecl *RD, llvm::DIFile *Unit, + unsigned LineNo, StringRef LinkageName, + llvm::GlobalVariable *Var, llvm::DIScope *DContext); - /// getFunctionName - Get function name for the given FunctionDecl. If the + /// \brief Get function name for the given FunctionDecl. If the /// name is constructed on demand (e.g. C++ destructor) then the name /// is stored on the side. StringRef getFunctionName(const FunctionDecl *FD); - /// getObjCMethodName - Returns the unmangled name of an Objective-C method. + /// \brief Returns the unmangled name of an Objective-C method. /// This is the display name for the debugging info. StringRef getObjCMethodName(const ObjCMethodDecl *FD); - /// getSelectorName - Return selector name. This is used for debugging + /// \brief Return selector name. This is used for debugging /// info. StringRef getSelectorName(Selector S); - /// getClassName - Get class name including template argument list. + /// \brief Get class name including template argument list. StringRef getClassName(const RecordDecl *RD); - /// getVTableName - Get vtable name for the given Class. + /// \brief Get vtable name for the given Class. StringRef getVTableName(const CXXRecordDecl *Decl); - /// getLineNumber - Get line number for the location. If location is invalid + /// \brief Get line number for the location. If location is invalid /// then use current location. unsigned getLineNumber(SourceLocation Loc); - /// getColumnNumber - Get column number for the location. If location is + /// \brief Get column number for the location. If location is /// invalid then use current location. /// \param Force Assume DebugColumnInfo option is true. unsigned getColumnNumber(SourceLocation Loc, bool Force=false); /// \brief Collect various properties of a FunctionDecl. /// \param GD A GlobalDecl whose getDecl() must return a FunctionDecl. - void collectFunctionDeclProps(GlobalDecl GD, - llvm::DIFile Unit, + void collectFunctionDeclProps(GlobalDecl GD, llvm::DIFile *Unit, StringRef &Name, StringRef &LinkageName, - llvm::DIDescriptor &FDContext, - llvm::DIArray &TParamsArray, + llvm::DIScope *&FDContext, + llvm::DINodeArray &TParamsArray, unsigned &Flags); /// \brief Collect various properties of a VarDecl. - void collectVarDeclProps(const VarDecl *VD, llvm::DIFile &Unit, - unsigned &LineNo, QualType &T, - StringRef &Name, StringRef &LinkageName, - llvm::DIDescriptor &VDContext); + void collectVarDeclProps(const VarDecl *VD, llvm::DIFile *&Unit, + unsigned &LineNo, QualType &T, StringRef &Name, + StringRef &LinkageName, llvm::DIScope *&VDContext); - /// internString - Allocate a copy of \p A using the DebugInfoNames allocator + /// \brief Allocate a copy of \p A using the DebugInfoNames allocator /// and return a reference to it. If multiple arguments are given the strings /// are concatenated. StringRef internString(StringRef A, StringRef B = StringRef()) { @@ -448,33 +440,60 @@ private: } }; +/// \brief A scoped helper to set the current debug location to the specified +/// location or preferred location of the specified Expr. class ApplyDebugLocation { -protected: +private: + void init(SourceLocation TemporaryLocation, bool DefaultToEmpty = false); + ApplyDebugLocation(CodeGenFunction &CGF, bool DefaultToEmpty, + SourceLocation TemporaryLocation); + llvm::DebugLoc OriginalLocation; CodeGenFunction &CGF; - public: - ApplyDebugLocation(CodeGenFunction &CGF, - SourceLocation TemporaryLocation = SourceLocation(), - bool ForceColumnInfo = false); + + /// \brief Set the location to the (valid) TemporaryLocation. + ApplyDebugLocation(CodeGenFunction &CGF, SourceLocation TemporaryLocation); + ApplyDebugLocation(CodeGenFunction &CGF, const Expr *E); ApplyDebugLocation(CodeGenFunction &CGF, llvm::DebugLoc Loc); + ~ApplyDebugLocation(); -}; -/// ArtificialLocation - An RAII object that temporarily switches to -/// an artificial debug location that has a valid scope, but no line -/// information. This is useful when emitting compiler-generated -/// helper functions that have no source location associated with -/// them. The DWARF specification allows the compiler to use the -/// special line number 0 to indicate code that can not be attributed -/// to any source location. -/// -/// This is necessary because passing an empty SourceLocation to -/// CGDebugInfo::setLocation() will result in the last valid location -/// being reused. -class ArtificialLocation : public ApplyDebugLocation { -public: - ArtificialLocation(CodeGenFunction &CGF); + /// \brief Apply TemporaryLocation if it is valid. Otherwise switch to an + /// artificial debug location that has a valid scope, but no line information. + /// + /// Artificial locations are useful when emitting compiler-generated helper + /// functions that have no source location associated with them. The DWARF + /// specification allows the compiler to use the special line number 0 to + /// indicate code that can not be attributed to any source location. Note that + /// passing an empty SourceLocation to CGDebugInfo::setLocation() will result + /// in the last valid location being reused. + static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF) { + return ApplyDebugLocation(CGF, false, SourceLocation()); + } + /// \brief Apply TemporaryLocation if it is valid. Otherwise switch to an + /// artificial debug location that has a valid scope, but no line information. + static ApplyDebugLocation CreateDefaultArtificial(CodeGenFunction &CGF, + SourceLocation TemporaryLocation) { + return ApplyDebugLocation(CGF, false, TemporaryLocation); + } + + /// \brief Set the IRBuilder to not attach debug locations. Note that passing + /// an empty SourceLocation to CGDebugInfo::setLocation() will result in the + /// last valid location being reused. Note that all instructions that do not + /// have a location at the beginning of a function are counted towards to + /// funciton prologue. + static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF) { + return ApplyDebugLocation(CGF, true, SourceLocation()); + } + + /// \brief Apply TemporaryLocation if it is valid. Otherwise set the IRBuilder + /// to not attach debug locations. + static ApplyDebugLocation CreateDefaultEmpty(CodeGenFunction &CGF, + SourceLocation TemporaryLocation) { + return ApplyDebugLocation(CGF, true, TemporaryLocation); + } + }; diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp index 15a1a7f..579a041 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "CodeGenFunction.h" +#include "CGCleanup.h" #include "CGDebugInfo.h" #include "CGOpenCLRuntime.h" #include "CodeGenModule.h" @@ -34,6 +35,7 @@ using namespace CodeGen; void CodeGenFunction::EmitDecl(const Decl &D) { switch (D.getKind()) { case Decl::TranslationUnit: + case Decl::ExternCContext: case Decl::Namespace: case Decl::UnresolvedUsingTypename: case Decl::ClassTemplateSpecialization: @@ -154,6 +156,8 @@ static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) { assert(!D.isExternallyVisible() && "name shouldn't matter"); std::string ContextName; const DeclContext *DC = D.getDeclContext(); + if (auto *CD = dyn_cast<CapturedDecl>(DC)) + DC = cast<DeclContext>(CD->getNonClosureContext()); if (const auto *FD = dyn_cast<FunctionDecl>(DC)) ContextName = CGM.getMangledName(FD); else if (const auto *BD = dyn_cast<BlockDecl>(DC)) @@ -206,6 +210,9 @@ llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl( GV->setAlignment(getContext().getDeclAlign(&D).getQuantity()); setGlobalVisibility(GV, &D); + if (supportsCOMDAT() && GV->isWeakForLinker()) + GV->setComdat(TheModule.getOrInsertComdat(GV->getName())); + if (D.getTLSKind()) setTLSMode(GV, D); @@ -512,10 +519,7 @@ namespace { : Addr(addr), Size(size) {} void Emit(CodeGenFunction &CGF, Flags flags) override { - llvm::Value *castAddr = CGF.Builder.CreateBitCast(Addr, CGF.Int8PtrTy); - CGF.Builder.CreateCall2(CGF.CGM.getLLVMLifetimeEndFn(), - Size, castAddr) - ->setDoesNotThrow(); + CGF.EmitLifetimeEnd(Size, Addr); } }; } @@ -631,8 +635,9 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D, if (capturedByInit) { // We can use a simple GEP for this because it can't have been // moved yet. - tempLV.setAddress(Builder.CreateStructGEP(tempLV.getAddress(), - getByRefValueLLVMField(cast<VarDecl>(D)))); + tempLV.setAddress(Builder.CreateStructGEP( + nullptr, tempLV.getAddress(), + getByRefValueLLVMField(cast<VarDecl>(D)).second)); } llvm::PointerType *ty @@ -793,8 +798,9 @@ static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc, // If necessary, get a pointer to the element and emit it. if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt)) - emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i), - isVolatile, Builder); + emitStoresForInitAfterMemset( + Elt, Builder.CreateConstGEP2_32(Init->getType(), Loc, 0, i), + isVolatile, Builder); } return; } @@ -807,8 +813,9 @@ static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc, // If necessary, get a pointer to the element and emit it. if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt)) - emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i), - isVolatile, Builder); + emitStoresForInitAfterMemset( + Elt, Builder.CreateConstGEP2_32(Init->getType(), Loc, 0, i), + isVolatile, Builder); } } @@ -833,21 +840,6 @@ static bool shouldUseMemSetPlusStoresToInitialize(llvm::Constant *Init, canEmitInitWithFewStoresAfterMemset(Init, StoreBudget); } -/// Should we use the LLVM lifetime intrinsics for the given local variable? -static bool shouldUseLifetimeMarkers(CodeGenFunction &CGF, const VarDecl &D, - unsigned Size) { - // For now, only in optimized builds. - if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) - return false; - - // Limit the size of marked objects to 32 bytes. We don't want to increase - // compile time by marking tiny objects. - unsigned SizeThreshold = 32; - - return Size > SizeThreshold; -} - - /// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a /// variable declaration with auto, register, or no storage class specifier. /// These turn into simple stack objects, or GlobalValues depending on target. @@ -857,6 +849,38 @@ void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) { EmitAutoVarCleanups(emission); } +/// Emit a lifetime.begin marker if some criteria are satisfied. +/// \return a pointer to the temporary size Value if a marker was emitted, null +/// otherwise +llvm::Value *CodeGenFunction::EmitLifetimeStart(uint64_t Size, + llvm::Value *Addr) { + // For now, only in optimized builds. + if (CGM.getCodeGenOpts().OptimizationLevel == 0) + return nullptr; + + // Disable lifetime markers in msan builds. + // FIXME: Remove this when msan works with lifetime markers. + if (getLangOpts().Sanitize.has(SanitizerKind::Memory)) + return nullptr; + + llvm::Value *SizeV = llvm::ConstantInt::get(Int64Ty, Size); + llvm::Value *Args[] = { + SizeV, + new llvm::BitCastInst(Addr, Int8PtrTy, "", Builder.GetInsertBlock())}; + llvm::CallInst *C = llvm::CallInst::Create(CGM.getLLVMLifetimeStartFn(), Args, + "", Builder.GetInsertBlock()); + C->setDoesNotThrow(); + return SizeV; +} + +void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) { + llvm::Value *Args[] = {Size, new llvm::BitCastInst(Addr, Int8PtrTy, "", + Builder.GetInsertBlock())}; + llvm::CallInst *C = llvm::CallInst::Create(CGM.getLLVMLifetimeEndFn(), Args, + "", Builder.GetInsertBlock()); + C->setDoesNotThrow(); +} + /// EmitAutoVarAlloca - Emit the alloca and debug information for a /// local variable. Does not emit initialization or destruction. CodeGenFunction::AutoVarEmission @@ -952,13 +976,8 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) { // Emit a lifetime intrinsic if meaningful. There's no point // in doing this if we don't have a valid insertion point (?). uint64_t size = CGM.getDataLayout().getTypeAllocSize(LTy); - if (HaveInsertPoint() && shouldUseLifetimeMarkers(*this, D, size)) { - llvm::Value *sizeV = llvm::ConstantInt::get(Int64Ty, size); - - emission.SizeForLifetimeMarkers = sizeV; - llvm::Value *castAddr = Builder.CreateBitCast(Alloc, Int8PtrTy); - Builder.CreateCall2(CGM.getLLVMLifetimeStartFn(), sizeV, castAddr) - ->setDoesNotThrow(); + if (HaveInsertPoint()) { + emission.SizeForLifetimeMarkers = EmitLifetimeStart(size, Alloc); } else { assert(!emission.useLifetimeMarkers()); } @@ -971,7 +990,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) { llvm::Value *Stack = CreateTempAlloca(Int8PtrTy, "saved_stack"); llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave); - llvm::Value *V = Builder.CreateCall(F); + llvm::Value *V = Builder.CreateCall(F, {}); Builder.CreateStore(V, Stack); @@ -1087,7 +1106,7 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) { if (emission.wasEmittedAsGlobal()) return; const VarDecl &D = *emission.Variable; - ApplyDebugLocation DL(*this, D.getLocation()); + auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, D.getLocation()); QualType type = D.getType(); // If this local has an initializer, emit it now. @@ -1304,6 +1323,8 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) { EHStack.pushCleanup<CallLifetimeEnd>(NormalCleanup, emission.getAllocatedAddress(), emission.getSizeForLifetimeMarkers()); + EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin()); + cleanup.setLifetimeMarker(); } // Check the type for a cleanup. diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp index 19e4bdd..06d157b 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp @@ -139,9 +139,32 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D, const Expr *Init = D.getInit(); QualType T = D.getType(); + // The address space of a static local variable (DeclPtr) may be different + // from the address space of the "this" argument of the constructor. In that + // case, we need an addrspacecast before calling the constructor. + // + // struct StructWithCtor { + // __device__ StructWithCtor() {...} + // }; + // __device__ void foo() { + // __shared__ StructWithCtor s; + // ... + // } + // + // For example, in the above CUDA code, the static local variable s has a + // "shared" address space qualifier, but the constructor of StructWithCtor + // expects "this" in the "generic" address space. + unsigned ExpectedAddrSpace = getContext().getTargetAddressSpace(T); + unsigned ActualAddrSpace = DeclPtr->getType()->getPointerAddressSpace(); + if (ActualAddrSpace != ExpectedAddrSpace) { + llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(T); + llvm::PointerType *PTy = llvm::PointerType::get(LTy, ExpectedAddrSpace); + DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(DeclPtr, PTy); + } + if (!T->isReferenceType()) { if (getLangOpts().OpenMP && D.hasAttr<OMPThreadPrivateDeclAttr>()) - (void)CGM.getOpenMPRuntime().EmitOMPThreadPrivateVarDefinition( + (void)CGM.getOpenMPRuntime().emitThreadPrivateVarDefinition( &D, DeclPtr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(), PerformInit, this); if (PerformInit) @@ -236,6 +259,8 @@ llvm::Function *CodeGenModule::CreateGlobalInitOrDestructFunction( Fn->setSection(Section); } + SetLLVMFunctionAttributes(nullptr, getTypes().arrangeNullaryFunction(), Fn); + Fn->setCallingConv(getRuntimeCC()); if (!getLangOpts().Exceptions) @@ -267,15 +292,7 @@ void CodeGenModule::EmitPointerToInitFunc(const VarDecl *D, addUsedGlobal(PtrArray); // If the GV is already in a comdat group, then we have to join it. - llvm::Comdat *C = GV->getComdat(); - - // LinkOnce and Weak linkage are lowered down to a single-member comdat group. - // Make an explicit group so we can join it. - if (!C && (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage())) { - C = TheModule.getOrInsertComdat(GV->getName()); - GV->setComdat(C); - } - if (C) + if (llvm::Comdat *C = GV->getComdat()) PtrArray->setComdat(C); } @@ -283,6 +300,11 @@ void CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D, llvm::GlobalVariable *Addr, bool PerformInit) { + // Check if we've already initialized this decl. + auto I = DelayedCXXInitPosition.find(D); + if (I != DelayedCXXInitPosition.end() && I->second == ~0U) + return; + llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); SmallString<256> FnName; { @@ -312,11 +334,9 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D, CXXThreadLocalInitVars.push_back(Addr); } else if (PerformInit && ISA) { EmitPointerToInitFunc(D, Addr, Fn, ISA); - DelayedCXXInitPosition.erase(D); } else if (auto *IPA = D->getAttr<InitPriorityAttr>()) { OrderGlobalInits Key(IPA->getPriority(), PrioritizedCXXGlobalInits.size()); PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn)); - DelayedCXXInitPosition.erase(D); } else if (isTemplateInstantiation(D->getTemplateSpecializationKind())) { // C++ [basic.start.init]p2: // Definitions of explicitly specialized class template static data @@ -331,24 +351,24 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D, // minor startup time optimization. In the MS C++ ABI, there are no guard // variables, so this COMDAT key is required for correctness. AddGlobalCtor(Fn, 65535, COMDATKey); - DelayedCXXInitPosition.erase(D); } else if (D->hasAttr<SelectAnyAttr>()) { // SelectAny globals will be comdat-folded. Put the initializer into a // COMDAT group associated with the global, so the initializers get folded // too. AddGlobalCtor(Fn, 65535, COMDATKey); - DelayedCXXInitPosition.erase(D); } else { - llvm::DenseMap<const Decl *, unsigned>::iterator I = - DelayedCXXInitPosition.find(D); + I = DelayedCXXInitPosition.find(D); // Re-do lookup in case of re-hash. if (I == DelayedCXXInitPosition.end()) { CXXGlobalInits.push_back(Fn); - } else { - assert(CXXGlobalInits[I->second] == nullptr); + } else if (I->second != ~0U) { + assert(I->second < CXXGlobalInits.size() && + CXXGlobalInits[I->second] == nullptr); CXXGlobalInits[I->second] = Fn; - DelayedCXXInitPosition.erase(I); } } + + // Remember that we already emitted the initializer for this global. + DelayedCXXInitPosition[D] = ~0U; } void CodeGenModule::EmitCXXThreadLocalInitFunc() { @@ -411,7 +431,7 @@ CodeGenModule::EmitCXXGlobalInitFunc() { // priority emitted above. FileName = llvm::sys::path::filename(MainFile->getName()); } else { - FileName = SmallString<128>("<null>"); + FileName = "<null>"; } for (size_t i = 0; i < FileName.size(); ++i) { @@ -477,11 +497,11 @@ CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn, ArrayRef<llvm::Function *> Decls, llvm::GlobalVariable *Guard) { { - ApplyDebugLocation NL(*this); + auto NL = ApplyDebugLocation::CreateEmpty(*this); StartFunction(GlobalDecl(), getContext().VoidTy, Fn, getTypes().arrangeNullaryFunction(), FunctionArgList()); // Emit an artificial location for this function. - ArtificialLocation AL(*this); + auto AL = ApplyDebugLocation::CreateArtificial(*this); llvm::BasicBlock *ExitBlock = nullptr; if (Guard) { @@ -528,11 +548,11 @@ void CodeGenFunction::GenerateCXXGlobalDtorsFunc(llvm::Function *Fn, const std::vector<std::pair<llvm::WeakVH, llvm::Constant*> > &DtorsAndObjects) { { - ApplyDebugLocation NL(*this); + auto NL = ApplyDebugLocation::CreateEmpty(*this); StartFunction(GlobalDecl(), getContext().VoidTy, Fn, getTypes().arrangeNullaryFunction(), FunctionArgList()); // Emit an artificial location for this function. - ArtificialLocation AL(*this); + auto AL = ApplyDebugLocation::CreateArtificial(*this); // Emit the dtors, in reverse order from construction. for (unsigned i = 0, e = DtorsAndObjects.size(); i != e; ++i) { diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp index cb8eb8f..d9a3f0b 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp @@ -16,23 +16,18 @@ #include "CGCleanup.h" #include "CGObjCRuntime.h" #include "TargetInfo.h" +#include "clang/AST/Mangle.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" +#include "clang/AST/StmtVisitor.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/Intrinsics.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/Support/SaveAndRestore.h" using namespace clang; using namespace CodeGen; -static llvm::Constant *getAllocateExceptionFn(CodeGenModule &CGM) { - // void *__cxa_allocate_exception(size_t thrown_size); - - llvm::FunctionType *FTy = - llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*IsVarArgs=*/false); - - return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception"); -} - static llvm::Constant *getFreeExceptionFn(CodeGenModule &CGM) { // void __cxa_free_exception(void *thrown_exception); @@ -42,44 +37,6 @@ static llvm::Constant *getFreeExceptionFn(CodeGenModule &CGM) { return CGM.CreateRuntimeFunction(FTy, "__cxa_free_exception"); } -static llvm::Constant *getThrowFn(CodeGenModule &CGM) { - // void __cxa_throw(void *thrown_exception, std::type_info *tinfo, - // void (*dest) (void *)); - - llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy }; - llvm::FunctionType *FTy = - llvm::FunctionType::get(CGM.VoidTy, Args, /*IsVarArgs=*/false); - - return CGM.CreateRuntimeFunction(FTy, "__cxa_throw"); -} - -static llvm::Constant *getGetExceptionPtrFn(CodeGenModule &CGM) { - // void *__cxa_get_exception_ptr(void*); - - llvm::FunctionType *FTy = - llvm::FunctionType::get(CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false); - - return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr"); -} - -static llvm::Constant *getBeginCatchFn(CodeGenModule &CGM) { - // void *__cxa_begin_catch(void*); - - llvm::FunctionType *FTy = - llvm::FunctionType::get(CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false); - - return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch"); -} - -static llvm::Constant *getEndCatchFn(CodeGenModule &CGM) { - // void __cxa_end_catch(); - - llvm::FunctionType *FTy = - llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false); - - return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch"); -} - static llvm::Constant *getUnexpectedFn(CodeGenModule &CGM) { // void __cxa_call_unexpected(void *thrown_exception); @@ -89,23 +46,30 @@ static llvm::Constant *getUnexpectedFn(CodeGenModule &CGM) { return CGM.CreateRuntimeFunction(FTy, "__cxa_call_unexpected"); } -static llvm::Constant *getTerminateFn(CodeGenModule &CGM) { +llvm::Constant *CodeGenModule::getTerminateFn() { // void __terminate(); llvm::FunctionType *FTy = - llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false); + llvm::FunctionType::get(VoidTy, /*IsVarArgs=*/false); StringRef name; // In C++, use std::terminate(). - if (CGM.getLangOpts().CPlusPlus) - name = "_ZSt9terminatev"; // FIXME: mangling! - else if (CGM.getLangOpts().ObjC1 && - CGM.getLangOpts().ObjCRuntime.hasTerminate()) + if (getLangOpts().CPlusPlus && + getTarget().getCXXABI().isItaniumFamily()) { + name = "_ZSt9terminatev"; + } else if (getLangOpts().CPlusPlus && + getTarget().getCXXABI().isMicrosoft()) { + if (getLangOpts().isCompatibleWithMSVC(LangOptions::MSVC2015)) + name = "__std_terminate"; + else + name = "\01?terminate@@YAXXZ"; + } else if (getLangOpts().ObjC1 && + getLangOpts().ObjCRuntime.hasTerminate()) name = "objc_terminate"; else name = "abort"; - return CGM.CreateRuntimeFunction(FTy, name); + return CreateRuntimeFunction(FTy, name); } static llvm::Constant *getCatchallRethrowFn(CodeGenModule &CGM, @@ -126,7 +90,12 @@ namespace { // This function must have prototype void(void*). const char *CatchallRethrowFn; - static const EHPersonality &get(CodeGenModule &CGM); + static const EHPersonality &get(CodeGenModule &CGM, + const FunctionDecl *FD); + static const EHPersonality &get(CodeGenFunction &CGF) { + return get(CGF.CGM, dyn_cast_or_null<FunctionDecl>(CGF.CurCodeDecl)); + } + static const EHPersonality GNU_C; static const EHPersonality GNU_C_SJLJ; static const EHPersonality GNU_C_SEH; @@ -137,6 +106,9 @@ namespace { static const EHPersonality GNU_CPlusPlus; static const EHPersonality GNU_CPlusPlus_SJLJ; static const EHPersonality GNU_CPlusPlus_SEH; + static const EHPersonality MSVC_except_handler; + static const EHPersonality MSVC_C_specific_handler; + static const EHPersonality MSVC_CxxFrameHandler3; }; } @@ -159,6 +131,12 @@ const EHPersonality EHPersonality::GNU_ObjCXX = { "__gnustep_objcxx_personality_v0", nullptr }; const EHPersonality EHPersonality::GNUstep_ObjC = { "__gnustep_objc_personality_v0", nullptr }; +const EHPersonality +EHPersonality::MSVC_except_handler = { "_except_handler3", nullptr }; +const EHPersonality +EHPersonality::MSVC_C_specific_handler = { "__C_specific_handler", nullptr }; +const EHPersonality +EHPersonality::MSVC_CxxFrameHandler3 = { "__CxxFrameHandler3", nullptr }; /// On Win64, use libgcc's SEH personality function. We fall back to dwarf on /// other platforms, unless the user asked for SjLj exceptions. @@ -231,9 +209,29 @@ static const EHPersonality &getObjCXXPersonality(const llvm::Triple &T, llvm_unreachable("bad runtime kind"); } -const EHPersonality &EHPersonality::get(CodeGenModule &CGM) { +static const EHPersonality &getSEHPersonalityMSVC(const llvm::Triple &T) { + if (T.getArch() == llvm::Triple::x86) + return EHPersonality::MSVC_except_handler; + return EHPersonality::MSVC_C_specific_handler; +} + +const EHPersonality &EHPersonality::get(CodeGenModule &CGM, + const FunctionDecl *FD) { const llvm::Triple &T = CGM.getTarget().getTriple(); const LangOptions &L = CGM.getLangOpts(); + + // Try to pick a personality function that is compatible with MSVC if we're + // not compiling Obj-C. Obj-C users better have an Obj-C runtime that supports + // the GCC-style personality function. + if (T.isWindowsMSVCEnvironment() && !L.ObjC1) { + if (L.SjLjExceptions) + return EHPersonality::GNU_CPlusPlus_SJLJ; + else if (FD && FD->usesSEHTry()) + return getSEHPersonalityMSVC(T); + else + return EHPersonality::MSVC_CxxFrameHandler3; + } + if (L.CPlusPlus && L.ObjC1) return getObjCXXPersonality(T, L); else if (L.CPlusPlus) @@ -318,7 +316,7 @@ void CodeGenModule::SimplifyPersonality() { if (!LangOpts.ObjCRuntime.isNeXTFamily()) return; - const EHPersonality &ObjCXX = EHPersonality::get(*this); + const EHPersonality &ObjCXX = EHPersonality::get(*this, /*FD=*/nullptr); const EHPersonality &CXX = getCXXPersonality(getTarget().getTriple(), LangOpts); if (&ObjCXX == &CXX) @@ -369,17 +367,16 @@ namespace { // differs from EmitAnyExprToMem only in that, if a final copy-ctor // call is required, an exception within that copy ctor causes // std::terminate to be invoked. -static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *e, - llvm::Value *addr) { +void CodeGenFunction::EmitAnyExprToExn(const Expr *e, llvm::Value *addr) { // Make sure the exception object is cleaned up if there's an // exception during initialization. - CGF.pushFullExprCleanup<FreeException>(EHCleanup, addr); - EHScopeStack::stable_iterator cleanup = CGF.EHStack.stable_begin(); + pushFullExprCleanup<FreeException>(EHCleanup, addr); + EHScopeStack::stable_iterator cleanup = EHStack.stable_begin(); // __cxa_allocate_exception returns a void*; we need to cast this // to the appropriate type for the object. - llvm::Type *ty = CGF.ConvertTypeForMem(e->getType())->getPointerTo(); - llvm::Value *typedAddr = CGF.Builder.CreateBitCast(addr, ty); + llvm::Type *ty = ConvertTypeForMem(e->getType())->getPointerTo(); + llvm::Value *typedAddr = Builder.CreateBitCast(addr, ty); // FIXME: this isn't quite right! If there's a final unelided call // to a copy constructor, then according to [except.terminate]p1 we @@ -388,11 +385,11 @@ static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *e, // evaluated but before the exception is caught. But the best way // to handle that is to teach EmitAggExpr to do the final copy // differently if it can't be elided. - CGF.EmitAnyExprToMem(e, typedAddr, e->getType().getQualifiers(), - /*IsInit*/ true); + EmitAnyExprToMem(e, typedAddr, e->getType().getQualifiers(), + /*IsInit*/ true); // Deactivate the cleanup block. - CGF.DeactivateCleanupBlock(cleanup, cast<llvm::Instruction>(typedAddr)); + DeactivateCleanupBlock(cleanup, cast<llvm::Instruction>(typedAddr)); } llvm::Value *CodeGenFunction::getExceptionSlot() { @@ -417,67 +414,18 @@ llvm::Value *CodeGenFunction::getSelectorFromSlot() { void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint) { - if (!E->getSubExpr()) { - CGM.getCXXABI().emitRethrow(*this, /*isNoReturn*/true); - - // throw is an expression, and the expression emitters expect us - // to leave ourselves at a valid insertion point. - if (KeepInsertionPoint) - EmitBlock(createBasicBlock("throw.cont")); - - return; - } - - if (CGM.getTarget().getTriple().isKnownWindowsMSVCEnvironment()) { - ErrorUnsupported(E, "throw expression"); - return; - } - - QualType ThrowType = E->getSubExpr()->getType(); - - if (ThrowType->isObjCObjectPointerType()) { - const Stmt *ThrowStmt = E->getSubExpr(); - const ObjCAtThrowStmt S(E->getExprLoc(), - const_cast<Stmt *>(ThrowStmt)); - CGM.getObjCRuntime().EmitThrowStmt(*this, S, false); - // This will clear insertion point which was not cleared in - // call to EmitThrowStmt. - if (KeepInsertionPoint) - EmitBlock(createBasicBlock("throw.cont")); - return; - } - - // Now allocate the exception object. - llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); - uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity(); - - llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(CGM); - llvm::CallInst *ExceptionPtr = - EmitNounwindRuntimeCall(AllocExceptionFn, - llvm::ConstantInt::get(SizeTy, TypeSize), - "exception"); - - EmitAnyExprToExn(*this, E->getSubExpr(), ExceptionPtr); - - // Now throw the exception. - llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType, - /*ForEH=*/true); - - // The address of the destructor. If the exception type has a - // trivial destructor (or isn't a record), we just pass null. - llvm::Constant *Dtor = nullptr; - if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) { - CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl()); - if (!Record->hasTrivialDestructor()) { - CXXDestructorDecl *DtorD = Record->getDestructor(); - Dtor = CGM.getAddrOfCXXStructor(DtorD, StructorType::Complete); - Dtor = llvm::ConstantExpr::getBitCast(Dtor, Int8PtrTy); + if (const Expr *SubExpr = E->getSubExpr()) { + QualType ThrowType = SubExpr->getType(); + if (ThrowType->isObjCObjectPointerType()) { + const Stmt *ThrowStmt = E->getSubExpr(); + const ObjCAtThrowStmt S(E->getExprLoc(), const_cast<Stmt *>(ThrowStmt)); + CGM.getObjCRuntime().EmitThrowStmt(*this, S, false); + } else { + CGM.getCXXABI().emitThrow(*this, E); } + } else { + CGM.getCXXABI().emitRethrow(*this, /*isNoReturn=*/true); } - if (!Dtor) Dtor = llvm::Constant::getNullValue(Int8PtrTy); - - llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor }; - EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args); // throw is an expression, and the expression emitters expect us // to leave ourselves at a valid insertion point. @@ -509,6 +457,10 @@ void CodeGenFunction::EmitStartEHSpec(const Decl *D) { EHStack.pushTerminate(); } } else if (EST == EST_Dynamic || EST == EST_DynamicNone) { + // TODO: Revisit exception specifications for the MS ABI. There is a way to + // encode these in an object file but MSVC doesn't do anything with it. + if (getTarget().getCXXABI().isMicrosoft()) + return; unsigned NumExceptions = Proto->getNumExceptions(); EHFilterScope *Filter = EHStack.pushFilter(NumExceptions); @@ -543,8 +495,9 @@ static void emitFilterDispatchBlock(CodeGenFunction &CGF, llvm::Value *zero = CGF.Builder.getInt32(0); llvm::Value *failsFilter = - CGF.Builder.CreateICmpSLT(selector, zero, "ehspec.fails"); - CGF.Builder.CreateCondBr(failsFilter, unexpectedBB, CGF.getEHResumeBlock(false)); + CGF.Builder.CreateICmpSLT(selector, zero, "ehspec.fails"); + CGF.Builder.CreateCondBr(failsFilter, unexpectedBB, + CGF.getEHResumeBlock(false)); CGF.EmitBlock(unexpectedBB); } @@ -582,6 +535,10 @@ void CodeGenFunction::EmitEndEHSpec(const Decl *D) { EHStack.popTerminate(); } } else if (EST == EST_Dynamic || EST == EST_DynamicNone) { + // TODO: Revisit exception specifications for the MS ABI. There is a way to + // encode these in an object file but MSVC doesn't do anything with it. + if (getTarget().getCXXABI().isMicrosoft()) + return; EHFilterScope &filterScope = cast<EHFilterScope>(*EHStack.begin()); emitFilterDispatchBlock(*this, filterScope); EHStack.popFilter(); @@ -589,11 +546,6 @@ void CodeGenFunction::EmitEndEHSpec(const Decl *D) { } void CodeGenFunction::EmitCXXTryStmt(const CXXTryStmt &S) { - if (CGM.getTarget().getTriple().isKnownWindowsMSVCEnvironment()) { - ErrorUnsupported(&S, "try statement"); - return; - } - EnterCXXTryStmt(S); EmitStmt(S.getTryBlock()); ExitCXXTryStmt(S); @@ -622,7 +574,8 @@ void CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { if (CaughtType->isObjCObjectPointerType()) TypeInfo = CGM.getObjCRuntime().GetEHType(CaughtType); else - TypeInfo = CGM.GetAddrOfRTTIDescriptor(CaughtType, /*ForEH=*/true); + TypeInfo = + CGM.getAddrOfCXXCatchHandlerType(CaughtType, C->getCaughtType()); CatchScope->setHandler(I, TypeInfo, Handler); } else { // No exception decl indicates '...', a catch-all. @@ -695,8 +648,15 @@ llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() { assert(EHStack.requiresLandingPad()); assert(!EHStack.empty()); - if (!CGM.getLangOpts().Exceptions) - return nullptr; + // If exceptions are disabled, there are usually no landingpads. However, when + // SEH is enabled, functions using SEH still get landingpads. + const LangOptions &LO = CGM.getLangOpts(); + if (!LO.Exceptions) { + if (!LO.Borland && !LO.MicrosoftExt) + return nullptr; + if (!currentFunctionUsesSEHTry()) + return nullptr; + } // Check the innermost scope for a cached landing pad. If this is // a non-EH cleanup, we'll check enclosing scopes in EmitLandingPad. @@ -734,9 +694,9 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() { // Save the current IR generation state. CGBuilderTy::InsertPoint savedIP = Builder.saveAndClearIP(); - ApplyDebugLocation AutoRestoreLocation(*this, CurEHLocation); + auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, CurEHLocation); - const EHPersonality &personality = EHPersonality::get(CGM); + const EHPersonality &personality = EHPersonality::get(*this); // Create and configure the landing pad. llvm::BasicBlock *lpad = createBasicBlock("lpad"); @@ -762,8 +722,8 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() { bool hasFilter = false; SmallVector<llvm::Value*, 4> filterTypes; llvm::SmallPtrSet<llvm::Value*, 4> catchTypes; - for (EHScopeStack::iterator I = EHStack.begin(), E = EHStack.end(); - I != E; ++I) { + for (EHScopeStack::iterator I = EHStack.begin(), E = EHStack.end(); I != E; + ++I) { switch (I->getKind()) { case EHScope::Cleanup: @@ -857,263 +817,6 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() { return lpad; } -namespace { - /// A cleanup to call __cxa_end_catch. In many cases, the caught - /// exception type lets us state definitively that the thrown exception - /// type does not have a destructor. In particular: - /// - Catch-alls tell us nothing, so we have to conservatively - /// assume that the thrown exception might have a destructor. - /// - Catches by reference behave according to their base types. - /// - Catches of non-record types will only trigger for exceptions - /// of non-record types, which never have destructors. - /// - Catches of record types can trigger for arbitrary subclasses - /// of the caught type, so we have to assume the actual thrown - /// exception type might have a throwing destructor, even if the - /// caught type's destructor is trivial or nothrow. - struct CallEndCatch : EHScopeStack::Cleanup { - CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {} - bool MightThrow; - - void Emit(CodeGenFunction &CGF, Flags flags) override { - if (!MightThrow) { - CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM)); - return; - } - - CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM)); - } - }; -} - -/// Emits a call to __cxa_begin_catch and enters a cleanup to call -/// __cxa_end_catch. -/// -/// \param EndMightThrow - true if __cxa_end_catch might throw -static llvm::Value *CallBeginCatch(CodeGenFunction &CGF, - llvm::Value *Exn, - bool EndMightThrow) { - llvm::CallInst *call = - CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn); - - CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow); - - return call; -} - -/// A "special initializer" callback for initializing a catch -/// parameter during catch initialization. -static void InitCatchParam(CodeGenFunction &CGF, - const VarDecl &CatchParam, - llvm::Value *ParamAddr, - SourceLocation Loc) { - // Load the exception from where the landing pad saved it. - llvm::Value *Exn = CGF.getExceptionFromSlot(); - - CanQualType CatchType = - CGF.CGM.getContext().getCanonicalType(CatchParam.getType()); - llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType); - - // If we're catching by reference, we can just cast the object - // pointer to the appropriate pointer. - if (isa<ReferenceType>(CatchType)) { - QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType(); - bool EndCatchMightThrow = CaughtType->isRecordType(); - - // __cxa_begin_catch returns the adjusted object pointer. - llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow); - - // We have no way to tell the personality function that we're - // catching by reference, so if we're catching a pointer, - // __cxa_begin_catch will actually return that pointer by value. - if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) { - QualType PointeeType = PT->getPointeeType(); - - // When catching by reference, generally we should just ignore - // this by-value pointer and use the exception object instead. - if (!PointeeType->isRecordType()) { - - // Exn points to the struct _Unwind_Exception header, which - // we have to skip past in order to reach the exception data. - unsigned HeaderSize = - CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException(); - AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize); - - // However, if we're catching a pointer-to-record type that won't - // work, because the personality function might have adjusted - // the pointer. There's actually no way for us to fully satisfy - // the language/ABI contract here: we can't use Exn because it - // might have the wrong adjustment, but we can't use the by-value - // pointer because it's off by a level of abstraction. - // - // The current solution is to dump the adjusted pointer into an - // alloca, which breaks language semantics (because changing the - // pointer doesn't change the exception) but at least works. - // The better solution would be to filter out non-exact matches - // and rethrow them, but this is tricky because the rethrow - // really needs to be catchable by other sites at this landing - // pad. The best solution is to fix the personality function. - } else { - // Pull the pointer for the reference type off. - llvm::Type *PtrTy = - cast<llvm::PointerType>(LLVMCatchTy)->getElementType(); - - // Create the temporary and write the adjusted pointer into it. - llvm::Value *ExnPtrTmp = CGF.CreateTempAlloca(PtrTy, "exn.byref.tmp"); - llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy); - CGF.Builder.CreateStore(Casted, ExnPtrTmp); - - // Bind the reference to the temporary. - AdjustedExn = ExnPtrTmp; - } - } - - llvm::Value *ExnCast = - CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref"); - CGF.Builder.CreateStore(ExnCast, ParamAddr); - return; - } - - // Scalars and complexes. - TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType); - if (TEK != TEK_Aggregate) { - llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false); - - // If the catch type is a pointer type, __cxa_begin_catch returns - // the pointer by value. - if (CatchType->hasPointerRepresentation()) { - llvm::Value *CastExn = - CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted"); - - switch (CatchType.getQualifiers().getObjCLifetime()) { - case Qualifiers::OCL_Strong: - CastExn = CGF.EmitARCRetainNonBlock(CastExn); - // fallthrough - - case Qualifiers::OCL_None: - case Qualifiers::OCL_ExplicitNone: - case Qualifiers::OCL_Autoreleasing: - CGF.Builder.CreateStore(CastExn, ParamAddr); - return; - - case Qualifiers::OCL_Weak: - CGF.EmitARCInitWeak(ParamAddr, CastExn); - return; - } - llvm_unreachable("bad ownership qualifier!"); - } - - // Otherwise, it returns a pointer into the exception object. - - llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok - llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy); - - LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType); - LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType, - CGF.getContext().getDeclAlign(&CatchParam)); - switch (TEK) { - case TEK_Complex: - CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV, - /*init*/ true); - return; - case TEK_Scalar: { - llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc); - CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true); - return; - } - case TEK_Aggregate: - llvm_unreachable("evaluation kind filtered out!"); - } - llvm_unreachable("bad evaluation kind"); - } - - assert(isa<RecordType>(CatchType) && "unexpected catch type!"); - - llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok - - // Check for a copy expression. If we don't have a copy expression, - // that means a trivial copy is okay. - const Expr *copyExpr = CatchParam.getInit(); - if (!copyExpr) { - llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true); - llvm::Value *adjustedExn = CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy); - CGF.EmitAggregateCopy(ParamAddr, adjustedExn, CatchType); - return; - } - - // We have to call __cxa_get_exception_ptr to get the adjusted - // pointer before copying. - llvm::CallInst *rawAdjustedExn = - CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn); - - // Cast that to the appropriate type. - llvm::Value *adjustedExn = CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy); - - // The copy expression is defined in terms of an OpaqueValueExpr. - // Find it and map it to the adjusted expression. - CodeGenFunction::OpaqueValueMapping - opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr), - CGF.MakeAddrLValue(adjustedExn, CatchParam.getType())); - - // Call the copy ctor in a terminate scope. - CGF.EHStack.pushTerminate(); - - // Perform the copy construction. - CharUnits Alignment = CGF.getContext().getDeclAlign(&CatchParam); - CGF.EmitAggExpr(copyExpr, - AggValueSlot::forAddr(ParamAddr, Alignment, Qualifiers(), - AggValueSlot::IsNotDestructed, - AggValueSlot::DoesNotNeedGCBarriers, - AggValueSlot::IsNotAliased)); - - // Leave the terminate scope. - CGF.EHStack.popTerminate(); - - // Undo the opaque value mapping. - opaque.pop(); - - // Finally we can call __cxa_begin_catch. - CallBeginCatch(CGF, Exn, true); -} - -/// Begins a catch statement by initializing the catch variable and -/// calling __cxa_begin_catch. -static void BeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *S) { - // We have to be very careful with the ordering of cleanups here: - // C++ [except.throw]p4: - // The destruction [of the exception temporary] occurs - // immediately after the destruction of the object declared in - // the exception-declaration in the handler. - // - // So the precise ordering is: - // 1. Construct catch variable. - // 2. __cxa_begin_catch - // 3. Enter __cxa_end_catch cleanup - // 4. Enter dtor cleanup - // - // We do this by using a slightly abnormal initialization process. - // Delegation sequence: - // - ExitCXXTryStmt opens a RunCleanupsScope - // - EmitAutoVarAlloca creates the variable and debug info - // - InitCatchParam initializes the variable from the exception - // - CallBeginCatch calls __cxa_begin_catch - // - CallBeginCatch enters the __cxa_end_catch cleanup - // - EmitAutoVarCleanups enters the variable destructor cleanup - // - EmitCXXTryStmt emits the code for the catch body - // - EmitCXXTryStmt close the RunCleanupsScope - - VarDecl *CatchParam = S->getExceptionDecl(); - if (!CatchParam) { - llvm::Value *Exn = CGF.getExceptionFromSlot(); - CallBeginCatch(CGF, Exn, true); - return; - } - - // Emit the local. - CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam); - InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getLocStart()); - CGF.EmitAutoVarCleanups(var); -} - /// Emit the structure of the dispatch block for the given catch scope. /// It is an invariant that the dispatch block already exists. static void emitCatchDispatchBlock(CodeGenFunction &CGF, @@ -1252,11 +955,10 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { RunCleanupsScope CatchScope(*this); // Initialize the catch variable and set up the cleanups. - BeginCatch(*this, C); + CGM.getCXXABI().emitBeginCatch(*this, C); // Emit the PGO counter increment. - RegionCounter CatchCnt = getPGORegionCounter(C); - CatchCnt.beginRegion(Builder); + incrementProfileCounter(C); // Perform the body of the catch. EmitStmt(C->getHandlerBlock()); @@ -1284,9 +986,8 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { Builder.CreateBr(ContBB); } - RegionCounter ContCnt = getPGORegionCounter(&S); EmitBlock(ContBB); - ContCnt.beginRegion(Builder); + incrementProfileCounter(&S); } namespace { @@ -1480,68 +1181,6 @@ void CodeGenFunction::FinallyInfo::exit(CodeGenFunction &CGF) { CGF.PopCleanupBlock(); } -/// In a terminate landing pad, should we use __clang__call_terminate -/// or just a naked call to std::terminate? -/// -/// __clang_call_terminate calls __cxa_begin_catch, which then allows -/// std::terminate to usefully report something about the -/// violating exception. -static bool useClangCallTerminate(CodeGenModule &CGM) { - // Only do this for Itanium-family ABIs in C++ mode. - return (CGM.getLangOpts().CPlusPlus && - CGM.getTarget().getCXXABI().isItaniumFamily()); -} - -/// Get or define the following function: -/// void @__clang_call_terminate(i8* %exn) nounwind noreturn -/// This code is used only in C++. -static llvm::Constant *getClangCallTerminateFn(CodeGenModule &CGM) { - llvm::FunctionType *fnTy = - llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false); - llvm::Constant *fnRef = - CGM.CreateRuntimeFunction(fnTy, "__clang_call_terminate"); - - llvm::Function *fn = dyn_cast<llvm::Function>(fnRef); - if (fn && fn->empty()) { - fn->setDoesNotThrow(); - fn->setDoesNotReturn(); - - // What we really want is to massively penalize inlining without - // forbidding it completely. The difference between that and - // 'noinline' is negligible. - fn->addFnAttr(llvm::Attribute::NoInline); - - // Allow this function to be shared across translation units, but - // we don't want it to turn into an exported symbol. - fn->setLinkage(llvm::Function::LinkOnceODRLinkage); - fn->setVisibility(llvm::Function::HiddenVisibility); - - // Set up the function. - llvm::BasicBlock *entry = - llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn); - CGBuilderTy builder(entry); - - // Pull the exception pointer out of the parameter list. - llvm::Value *exn = &*fn->arg_begin(); - - // Call __cxa_begin_catch(exn). - llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn); - catchCall->setDoesNotThrow(); - catchCall->setCallingConv(CGM.getRuntimeCC()); - - // Call std::terminate(). - llvm::CallInst *termCall = builder.CreateCall(getTerminateFn(CGM)); - termCall->setDoesNotThrow(); - termCall->setDoesNotReturn(); - termCall->setCallingConv(CGM.getRuntimeCC()); - - // std::terminate cannot return. - builder.CreateUnreachable(); - } - - return fnRef; -} - llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() { if (TerminateLandingPad) return TerminateLandingPad; @@ -1553,20 +1192,17 @@ llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() { Builder.SetInsertPoint(TerminateLandingPad); // Tell the backend that this is a landing pad. - const EHPersonality &Personality = EHPersonality::get(CGM); + const EHPersonality &Personality = EHPersonality::get(*this); llvm::LandingPadInst *LPadInst = Builder.CreateLandingPad(llvm::StructType::get(Int8PtrTy, Int32Ty, nullptr), getOpaquePersonalityFn(CGM, Personality), 0); LPadInst->addClause(getCatchAllValue(*this)); - llvm::CallInst *terminateCall; - if (useClangCallTerminate(CGM)) { - // Extract out the exception pointer. - llvm::Value *exn = Builder.CreateExtractValue(LPadInst, 0); - terminateCall = EmitNounwindRuntimeCall(getClangCallTerminateFn(CGM), exn); - } else { - terminateCall = EmitNounwindRuntimeCall(getTerminateFn(CGM)); - } + llvm::Value *Exn = 0; + if (getLangOpts().CPlusPlus) + Exn = Builder.CreateExtractValue(LPadInst, 0); + llvm::CallInst *terminateCall = + CGM.getCXXABI().emitTerminateForUnexpectedException(*this, Exn); terminateCall->setDoesNotReturn(); Builder.CreateUnreachable(); @@ -1586,14 +1222,11 @@ llvm::BasicBlock *CodeGenFunction::getTerminateHandler() { // end of the function by FinishFunction. TerminateHandler = createBasicBlock("terminate.handler"); Builder.SetInsertPoint(TerminateHandler); - llvm::CallInst *terminateCall; - if (useClangCallTerminate(CGM)) { - // Load the exception pointer. - llvm::Value *exn = getExceptionFromSlot(); - terminateCall = EmitNounwindRuntimeCall(getClangCallTerminateFn(CGM), exn); - } else { - terminateCall = EmitNounwindRuntimeCall(getTerminateFn(CGM)); - } + llvm::Value *Exn = 0; + if (getLangOpts().CPlusPlus) + Exn = getExceptionFromSlot(); + llvm::CallInst *terminateCall = + CGM.getCXXABI().emitTerminateForUnexpectedException(*this, Exn); terminateCall->setDoesNotReturn(); Builder.CreateUnreachable(); @@ -1612,15 +1245,14 @@ llvm::BasicBlock *CodeGenFunction::getEHResumeBlock(bool isCleanup) { EHResumeBlock = createBasicBlock("eh.resume"); Builder.SetInsertPoint(EHResumeBlock); - const EHPersonality &Personality = EHPersonality::get(CGM); + const EHPersonality &Personality = EHPersonality::get(*this); // This can always be a call because we necessarily didn't find // anything on the EH stack which needs our help. const char *RethrowName = Personality.CatchallRethrowFn; if (RethrowName != nullptr && !isCleanup) { EmitRuntimeCall(getCatchallRethrowFn(CGM, RethrowName), - getExceptionFromSlot()) - ->setDoesNotReturn(); + getExceptionFromSlot())->setDoesNotReturn(); Builder.CreateUnreachable(); Builder.restoreIP(SavedIP); return EHResumeBlock; @@ -1642,9 +1274,433 @@ llvm::BasicBlock *CodeGenFunction::getEHResumeBlock(bool isCleanup) { } void CodeGenFunction::EmitSEHTryStmt(const SEHTryStmt &S) { - CGM.ErrorUnsupported(&S, "SEH __try"); + // FIXME: Implement SEH on other architectures. + const llvm::Triple &T = CGM.getTarget().getTriple(); + if (T.getArch() != llvm::Triple::x86_64 || + !T.isKnownWindowsMSVCEnvironment()) { + ErrorUnsupported(&S, "__try statement"); + return; + } + + EnterSEHTryStmt(S); + { + JumpDest TryExit = getJumpDestInCurrentScope("__try.__leave"); + + SEHTryEpilogueStack.push_back(&TryExit); + EmitStmt(S.getTryBlock()); + SEHTryEpilogueStack.pop_back(); + + if (!TryExit.getBlock()->use_empty()) + EmitBlock(TryExit.getBlock(), /*IsFinished=*/true); + else + delete TryExit.getBlock(); + } + ExitSEHTryStmt(S); +} + +namespace { +struct PerformSEHFinally : EHScopeStack::Cleanup { + llvm::Function *OutlinedFinally; + PerformSEHFinally(llvm::Function *OutlinedFinally) + : OutlinedFinally(OutlinedFinally) {} + + void Emit(CodeGenFunction &CGF, Flags F) override { + ASTContext &Context = CGF.getContext(); + QualType ArgTys[2] = {Context.UnsignedCharTy, Context.VoidPtrTy}; + FunctionProtoType::ExtProtoInfo EPI; + const auto *FTP = cast<FunctionType>( + Context.getFunctionType(Context.VoidTy, ArgTys, EPI)); + + CallArgList Args; + llvm::Value *IsForEH = + llvm::ConstantInt::get(CGF.ConvertType(ArgTys[0]), F.isForEHCleanup()); + Args.add(RValue::get(IsForEH), ArgTys[0]); + + CodeGenModule &CGM = CGF.CGM; + llvm::Value *Zero = llvm::ConstantInt::get(CGM.Int32Ty, 0); + llvm::Value *FrameAddr = CGM.getIntrinsic(llvm::Intrinsic::frameaddress); + llvm::Value *FP = CGF.Builder.CreateCall(FrameAddr, Zero); + Args.add(RValue::get(FP), ArgTys[1]); + + const CGFunctionInfo &FnInfo = + CGM.getTypes().arrangeFreeFunctionCall(Args, FTP, /*chainCall=*/false); + CGF.EmitCall(FnInfo, OutlinedFinally, ReturnValueSlot(), Args); + } +}; +} + +namespace { +/// Find all local variable captures in the statement. +struct CaptureFinder : ConstStmtVisitor<CaptureFinder> { + CodeGenFunction &ParentCGF; + const VarDecl *ParentThis; + SmallVector<const VarDecl *, 4> Captures; + CaptureFinder(CodeGenFunction &ParentCGF, const VarDecl *ParentThis) + : ParentCGF(ParentCGF), ParentThis(ParentThis) {} + + void Visit(const Stmt *S) { + // See if this is a capture, then recurse. + ConstStmtVisitor<CaptureFinder>::Visit(S); + for (const Stmt *Child : S->children()) + if (Child) + Visit(Child); + } + + void VisitDeclRefExpr(const DeclRefExpr *E) { + // If this is already a capture, just make sure we capture 'this'. + if (E->refersToEnclosingVariableOrCapture()) { + Captures.push_back(ParentThis); + return; + } + + const auto *D = dyn_cast<VarDecl>(E->getDecl()); + if (D && D->isLocalVarDeclOrParm() && D->hasLocalStorage()) + Captures.push_back(D); + } + + void VisitCXXThisExpr(const CXXThisExpr *E) { + Captures.push_back(ParentThis); + } +}; +} + +void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF, + const Stmt *OutlinedStmt, + llvm::Value *ParentFP) { + // Find all captures in the Stmt. + CaptureFinder Finder(ParentCGF, ParentCGF.CXXABIThisDecl); + Finder.Visit(OutlinedStmt); + + // Typically there are no captures and we can exit early. + if (Finder.Captures.empty()) + return; + + // Prepare the first two arguments to llvm.framerecover. + llvm::Function *FrameRecoverFn = llvm::Intrinsic::getDeclaration( + &CGM.getModule(), llvm::Intrinsic::framerecover); + llvm::Constant *ParentI8Fn = + llvm::ConstantExpr::getBitCast(ParentCGF.CurFn, Int8PtrTy); + + // Create llvm.framerecover calls for all captures. + for (const VarDecl *VD : Finder.Captures) { + if (isa<ImplicitParamDecl>(VD)) { + CGM.ErrorUnsupported(VD, "'this' captured by SEH"); + CXXThisValue = llvm::UndefValue::get(ConvertTypeForMem(VD->getType())); + continue; + } + if (VD->getType()->isVariablyModifiedType()) { + CGM.ErrorUnsupported(VD, "VLA captured by SEH"); + continue; + } + assert((isa<ImplicitParamDecl>(VD) || VD->isLocalVarDeclOrParm()) && + "captured non-local variable"); + + // If this decl hasn't been declared yet, it will be declared in the + // OutlinedStmt. + auto I = ParentCGF.LocalDeclMap.find(VD); + if (I == ParentCGF.LocalDeclMap.end()) + continue; + llvm::Value *ParentVar = I->second; + + llvm::CallInst *RecoverCall = nullptr; + CGBuilderTy Builder(AllocaInsertPt); + if (auto *ParentAlloca = dyn_cast<llvm::AllocaInst>(ParentVar)) { + // Mark the variable escaped if nobody else referenced it and compute the + // frameescape index. + auto InsertPair = + ParentCGF.EscapedLocals.insert(std::make_pair(ParentAlloca, -1)); + if (InsertPair.second) + InsertPair.first->second = ParentCGF.EscapedLocals.size() - 1; + int FrameEscapeIdx = InsertPair.first->second; + // call i8* @llvm.framerecover(i8* bitcast(@parentFn), i8* %fp, i32 N) + RecoverCall = Builder.CreateCall( + FrameRecoverFn, {ParentI8Fn, ParentFP, + llvm::ConstantInt::get(Int32Ty, FrameEscapeIdx)}); + + } else { + // If the parent didn't have an alloca, we're doing some nested outlining. + // Just clone the existing framerecover call, but tweak the FP argument to + // use our FP value. All other arguments are constants. + auto *ParentRecover = + cast<llvm::IntrinsicInst>(ParentVar->stripPointerCasts()); + assert(ParentRecover->getIntrinsicID() == llvm::Intrinsic::framerecover && + "expected alloca or framerecover in parent LocalDeclMap"); + RecoverCall = cast<llvm::CallInst>(ParentRecover->clone()); + RecoverCall->setArgOperand(1, ParentFP); + RecoverCall->insertBefore(AllocaInsertPt); + } + + // Bitcast the variable, rename it, and insert it in the local decl map. + llvm::Value *ChildVar = + Builder.CreateBitCast(RecoverCall, ParentVar->getType()); + ChildVar->setName(ParentVar->getName()); + LocalDeclMap[VD] = ChildVar; + } +} + +/// Arrange a function prototype that can be called by Windows exception +/// handling personalities. On Win64, the prototype looks like: +/// RetTy func(void *EHPtrs, void *ParentFP); +void CodeGenFunction::startOutlinedSEHHelper(CodeGenFunction &ParentCGF, + StringRef Name, QualType RetTy, + FunctionArgList &Args, + const Stmt *OutlinedStmt) { + llvm::Function *ParentFn = ParentCGF.CurFn; + const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionDeclaration( + RetTy, Args, FunctionType::ExtInfo(), /*isVariadic=*/false); + + llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo); + llvm::Function *Fn = llvm::Function::Create( + FnTy, llvm::GlobalValue::InternalLinkage, Name.str(), &CGM.getModule()); + // The filter is either in the same comdat as the function, or it's internal. + if (llvm::Comdat *C = ParentFn->getComdat()) { + Fn->setComdat(C); + } else if (ParentFn->hasWeakLinkage() || ParentFn->hasLinkOnceLinkage()) { + llvm::Comdat *C = CGM.getModule().getOrInsertComdat(ParentFn->getName()); + ParentFn->setComdat(C); + Fn->setComdat(C); + } else { + Fn->setLinkage(llvm::GlobalValue::InternalLinkage); + } + + IsOutlinedSEHHelper = true; + + StartFunction(GlobalDecl(), RetTy, Fn, FnInfo, Args, + OutlinedStmt->getLocStart(), OutlinedStmt->getLocStart()); + + CGM.SetLLVMFunctionAttributes(nullptr, FnInfo, CurFn); + + auto AI = Fn->arg_begin(); + ++AI; + EmitCapturedLocals(ParentCGF, OutlinedStmt, &*AI); +} + +/// Create a stub filter function that will ultimately hold the code of the +/// filter expression. The EH preparation passes in LLVM will outline the code +/// from the main function body into this stub. +llvm::Function * +CodeGenFunction::GenerateSEHFilterFunction(CodeGenFunction &ParentCGF, + const SEHExceptStmt &Except) { + const Expr *FilterExpr = Except.getFilterExpr(); + SourceLocation StartLoc = FilterExpr->getLocStart(); + + SEHPointersDecl = ImplicitParamDecl::Create( + getContext(), nullptr, StartLoc, + &getContext().Idents.get("exception_pointers"), getContext().VoidPtrTy); + FunctionArgList Args; + Args.push_back(SEHPointersDecl); + Args.push_back(ImplicitParamDecl::Create( + getContext(), nullptr, StartLoc, + &getContext().Idents.get("frame_pointer"), getContext().VoidPtrTy)); + + // Get the mangled function name. + SmallString<128> Name; + { + llvm::raw_svector_ostream OS(Name); + const Decl *ParentCodeDecl = ParentCGF.CurCodeDecl; + const NamedDecl *Parent = dyn_cast_or_null<NamedDecl>(ParentCodeDecl); + assert(Parent && "FIXME: handle unnamed decls (lambdas, blocks) with SEH"); + CGM.getCXXABI().getMangleContext().mangleSEHFilterExpression(Parent, OS); + } + + startOutlinedSEHHelper(ParentCGF, Name, getContext().LongTy, Args, + FilterExpr); + + // Mark finally block calls as nounwind and noinline to make LLVM's job a + // little easier. + // FIXME: Remove these restrictions in the future. + CurFn->addFnAttr(llvm::Attribute::NoUnwind); + CurFn->addFnAttr(llvm::Attribute::NoInline); + + EmitSEHExceptionCodeSave(); + + // Emit the original filter expression, convert to i32, and return. + llvm::Value *R = EmitScalarExpr(FilterExpr); + R = Builder.CreateIntCast(R, ConvertType(getContext().LongTy), + FilterExpr->getType()->isSignedIntegerType()); + Builder.CreateStore(R, ReturnValue); + + FinishFunction(FilterExpr->getLocEnd()); + + return CurFn; +} + +llvm::Function * +CodeGenFunction::GenerateSEHFinallyFunction(CodeGenFunction &ParentCGF, + const SEHFinallyStmt &Finally) { + const Stmt *FinallyBlock = Finally.getBlock(); + SourceLocation StartLoc = FinallyBlock->getLocStart(); + + FunctionArgList Args; + Args.push_back(ImplicitParamDecl::Create( + getContext(), nullptr, StartLoc, + &getContext().Idents.get("abnormal_termination"), + getContext().UnsignedCharTy)); + Args.push_back(ImplicitParamDecl::Create( + getContext(), nullptr, StartLoc, + &getContext().Idents.get("frame_pointer"), getContext().VoidPtrTy)); + + // Get the mangled function name. + SmallString<128> Name; + { + llvm::raw_svector_ostream OS(Name); + const Decl *ParentCodeDecl = ParentCGF.CurCodeDecl; + const NamedDecl *Parent = dyn_cast_or_null<NamedDecl>(ParentCodeDecl); + assert(Parent && "FIXME: handle unnamed decls (lambdas, blocks) with SEH"); + CGM.getCXXABI().getMangleContext().mangleSEHFinallyBlock(Parent, OS); + } + + startOutlinedSEHHelper(ParentCGF, Name, getContext().VoidTy, Args, + FinallyBlock); + + // Emit the original filter expression, convert to i32, and return. + EmitStmt(FinallyBlock); + + FinishFunction(FinallyBlock->getLocEnd()); + + return CurFn; +} + +void CodeGenFunction::EmitSEHExceptionCodeSave() { + // Save the exception code in the exception slot to unify exception access in + // the filter function and the landing pad. + // struct EXCEPTION_POINTERS { + // EXCEPTION_RECORD *ExceptionRecord; + // CONTEXT *ContextRecord; + // }; + // void *exn.slot = + // (void *)(uintptr_t)exception_pointers->ExceptionRecord->ExceptionCode; + llvm::Value *Ptrs = Builder.CreateLoad(GetAddrOfLocalVar(SEHPointersDecl)); + llvm::Type *RecordTy = CGM.Int32Ty->getPointerTo(); + llvm::Type *PtrsTy = llvm::StructType::get(RecordTy, CGM.VoidPtrTy, nullptr); + Ptrs = Builder.CreateBitCast(Ptrs, PtrsTy->getPointerTo()); + llvm::Value *Rec = Builder.CreateStructGEP(PtrsTy, Ptrs, 0); + Rec = Builder.CreateLoad(Rec); + llvm::Value *Code = Builder.CreateLoad(Rec); + Code = Builder.CreateZExt(Code, CGM.IntPtrTy); + // FIXME: Change landing pads to produce {i32, i32} and make the exception + // slot an i32. + Code = Builder.CreateIntToPtr(Code, CGM.VoidPtrTy); + Builder.CreateStore(Code, getExceptionSlot()); +} + +llvm::Value *CodeGenFunction::EmitSEHExceptionInfo() { + // Sema should diagnose calling this builtin outside of a filter context, but + // don't crash if we screw up. + if (!SEHPointersDecl) + return llvm::UndefValue::get(Int8PtrTy); + return Builder.CreateLoad(GetAddrOfLocalVar(SEHPointersDecl)); +} + +llvm::Value *CodeGenFunction::EmitSEHExceptionCode() { + // If we're in a landing pad or filter function, the exception slot contains + // the code. + assert(ExceptionSlot); + llvm::Value *Code = + Builder.CreatePtrToInt(getExceptionFromSlot(), CGM.IntPtrTy); + return Builder.CreateTrunc(Code, CGM.Int32Ty); +} + +llvm::Value *CodeGenFunction::EmitSEHAbnormalTermination() { + // Abnormal termination is just the first parameter to the outlined finally + // helper. + auto AI = CurFn->arg_begin(); + return Builder.CreateZExt(&*AI, Int32Ty); +} + +void CodeGenFunction::EnterSEHTryStmt(const SEHTryStmt &S) { + CodeGenFunction HelperCGF(CGM, /*suppressNewContext=*/true); + if (const SEHFinallyStmt *Finally = S.getFinallyHandler()) { + // Push a cleanup for __finally blocks. + llvm::Function *FinallyFunc = + HelperCGF.GenerateSEHFinallyFunction(*this, *Finally); + EHStack.pushCleanup<PerformSEHFinally>(NormalAndEHCleanup, FinallyFunc); + return; + } + + // Otherwise, we must have an __except block. + const SEHExceptStmt *Except = S.getExceptHandler(); + assert(Except); + EHCatchScope *CatchScope = EHStack.pushCatch(1); + + // If the filter is known to evaluate to 1, then we can use the clause "catch + // i8* null". + llvm::Constant *C = + CGM.EmitConstantExpr(Except->getFilterExpr(), getContext().IntTy, this); + if (C && C->isOneValue()) { + CatchScope->setCatchAllHandler(0, createBasicBlock("__except")); + return; + } + + // In general, we have to emit an outlined filter function. Use the function + // in place of the RTTI typeinfo global that C++ EH uses. + llvm::Function *FilterFunc = + HelperCGF.GenerateSEHFilterFunction(*this, *Except); + llvm::Constant *OpaqueFunc = + llvm::ConstantExpr::getBitCast(FilterFunc, Int8PtrTy); + CatchScope->setHandler(0, OpaqueFunc, createBasicBlock("__except")); +} + +void CodeGenFunction::ExitSEHTryStmt(const SEHTryStmt &S) { + // Just pop the cleanup if it's a __finally block. + if (S.getFinallyHandler()) { + PopCleanupBlock(); + return; + } + + // Otherwise, we must have an __except block. + const SEHExceptStmt *Except = S.getExceptHandler(); + assert(Except && "__try must have __finally xor __except"); + EHCatchScope &CatchScope = cast<EHCatchScope>(*EHStack.begin()); + + // Don't emit the __except block if the __try block lacked invokes. + // TODO: Model unwind edges from instructions, either with iload / istore or + // a try body function. + if (!CatchScope.hasEHBranches()) { + CatchScope.clearHandlerBlocks(); + EHStack.popCatch(); + return; + } + + // The fall-through block. + llvm::BasicBlock *ContBB = createBasicBlock("__try.cont"); + + // We just emitted the body of the __try; jump to the continue block. + if (HaveInsertPoint()) + Builder.CreateBr(ContBB); + + // Check if our filter function returned true. + emitCatchDispatchBlock(*this, CatchScope); + + // Grab the block before we pop the handler. + llvm::BasicBlock *ExceptBB = CatchScope.getHandler(0).Block; + EHStack.popCatch(); + + EmitBlockAfterUses(ExceptBB); + + // Emit the __except body. + EmitStmt(Except->getBlock()); + + if (HaveInsertPoint()) + Builder.CreateBr(ContBB); + + EmitBlock(ContBB); } void CodeGenFunction::EmitSEHLeaveStmt(const SEHLeaveStmt &S) { - CGM.ErrorUnsupported(&S, "SEH __leave"); + // If this code is reachable then emit a stop point (if generating + // debug info). We have to do this ourselves because we are on the + // "simple" statement path. + if (HaveInsertPoint()) + EmitStopPoint(&S); + + // This must be a __leave from a __finally block, which we warn on and is UB. + // Just emit unreachable. + if (!isSEHTryScope()) { + Builder.CreateUnreachable(); + Builder.ClearInsertionPoint(); + return; + } + + EmitBranchThroughCleanup(*SEHTryEpilogueStack.back()); } diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp index ce7679c..1ed45a3 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp @@ -31,6 +31,7 @@ #include "llvm/IR/LLVMContext.h" #include "llvm/IR/MDBuilder.h" #include "llvm/Support/ConvertUTF.h" +#include "llvm/Support/MathExtras.h" using namespace clang; using namespace CodeGen; @@ -300,9 +301,26 @@ createReferenceTemporary(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, const Expr *Inner) { switch (M->getStorageDuration()) { case SD_FullExpression: - case SD_Automatic: - return CGF.CreateMemTemp(Inner->getType(), "ref.tmp"); - + case SD_Automatic: { + // If we have a constant temporary array or record try to promote it into a + // constant global under the same rules a normal constant would've been + // promoted. This is easier on the optimizer and generally emits fewer + // instructions. + QualType Ty = Inner->getType(); + if (CGF.CGM.getCodeGenOpts().MergeAllConstants && + (Ty->isArrayType() || Ty->isRecordType()) && + CGF.CGM.isTypeConstant(Ty, true)) + if (llvm::Constant *Init = CGF.CGM.EmitConstantExpr(Inner, Ty, &CGF)) { + auto *GV = new llvm::GlobalVariable( + CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true, + llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp"); + GV->setAlignment( + CGF.getContext().getTypeAlignInChars(Ty).getQuantity()); + // FIXME: Should we put the new global into a COMDAT? + return GV; + } + return CGF.CreateMemTemp(Ty, "ref.tmp"); + } case SD_Thread: case SD_Static: return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner); @@ -324,14 +342,15 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) { M->getType().getObjCLifetime() != Qualifiers::OCL_None && M->getType().getObjCLifetime() != Qualifiers::OCL_ExplicitNone) { llvm::Value *Object = createReferenceTemporary(*this, M, E); - LValue RefTempDst = MakeAddrLValue(Object, M->getType()); - if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object)) { + Object = llvm::ConstantExpr::getBitCast( + Var, ConvertTypeForMem(E->getType())->getPointerTo()); // We should not have emitted the initializer for this temporary as a // constant. assert(!Var->hasInitializer()); Var->setInitializer(CGM.EmitNullConstant(E->getType())); } + LValue RefTempDst = MakeAddrLValue(Object, M->getType()); switch (getEvaluationKind(E->getType())) { default: llvm_unreachable("expected scalar or aggregate expression"); @@ -370,8 +389,11 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) { // Create and initialize the reference temporary. llvm::Value *Object = createReferenceTemporary(*this, M, E); if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object)) { - // If the temporary is a global and has a constant initializer, we may - // have already initialized it. + Object = llvm::ConstantExpr::getBitCast( + Var, ConvertTypeForMem(E->getType())->getPointerTo()); + // If the temporary is a global and has a constant initializer or is a + // constant temporary that we promoted to a global, we may have already + // initialized it. if (!Var->hasInitializer()) { Var->setInitializer(CGM.EmitNullConstant(E->getType())); EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true); @@ -478,7 +500,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, SanitizerScope SanScope(this); - SmallVector<std::pair<llvm::Value *, SanitizerKind>, 3> Checks; + SmallVector<std::pair<llvm::Value *, SanitizerMask>, 3> Checks; llvm::BasicBlock *Done = nullptr; bool AllowNullPointers = TCK == TCK_DowncastPointer || TCK == TCK_Upcast || @@ -513,7 +535,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *Min = Builder.getFalse(); llvm::Value *CastAddr = Builder.CreateBitCast(Address, Int8PtrTy); llvm::Value *LargeEnough = - Builder.CreateICmpUGE(Builder.CreateCall2(F, CastAddr, Min), + Builder.CreateICmpUGE(Builder.CreateCall(F, {CastAddr, Min}), llvm::ConstantInt::get(IntPtrTy, Size)); Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize)); } @@ -807,6 +829,7 @@ LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) { /// length type, this is not possible. /// LValue CodeGenFunction::EmitLValue(const Expr *E) { + ApplyDebugLocation DL(*this, E); switch (E->getStmtClass()) { default: return EmitUnsupportedLValue(E, "l-value expression"); @@ -819,10 +842,14 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) { return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E)); case Expr::BinaryOperatorClass: return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); - case Expr::CompoundAssignOperatorClass: - if (!E->getType()->isAnyComplexType()) + case Expr::CompoundAssignOperatorClass: { + QualType Ty = E->getType(); + if (const AtomicType *AT = Ty->getAs<AtomicType>()) + Ty = AT->getValueType(); + if (!Ty->isAnyComplexType()) return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); + } case Expr::CallExprClass: case Expr::CXXMemberCallExprClass: case Expr::CXXOperatorCallExprClass: @@ -1135,7 +1162,7 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, } // Atomic operations have to be done on integral types. - if (Ty->isAtomicType()) { + if (Ty->isAtomicType() || typeIsSuitableForInlineAtomic(Ty, Volatile)) { LValue lvalue = LValue::MakeAddr(Addr, Ty, CharUnits::fromQuantity(Alignment), getContext(), TBAAInfo); @@ -1178,7 +1205,7 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(Ty) }; - SanitizerKind Kind = NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool; + SanitizerMask Kind = NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool; EmitCheck(std::make_pair(Check, Kind), "load_invalid_value", StaticArgs, EmitCheckValue(Load)); } @@ -1254,7 +1281,8 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, Value = EmitToMemory(Value, Ty); - if (Ty->isAtomicType()) { + if (Ty->isAtomicType() || + (!isInit && typeIsSuitableForInlineAtomic(Ty, Volatile))) { EmitAtomicStore(RValue::get(Value), LValue::MakeAddr(Addr, Ty, CharUnits::fromQuantity(Alignment), @@ -1692,8 +1720,8 @@ void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) { llvm::Value *Value = Src.getScalarVal(); if (OrigTy->isPointerTy()) Value = Builder.CreatePtrToInt(Value, Ty); - Builder.CreateCall2(F, llvm::MetadataAsValue::get(Ty->getContext(), RegName), - Value); + Builder.CreateCall( + F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value}); } // setObjCGCLValueClass - sets class of the lvalue for the purpose of @@ -1807,7 +1835,7 @@ EmitBitCastOfLValueToProperType(CodeGenFunction &CGF, static LValue EmitThreadPrivateVarDeclLValue( CodeGenFunction &CGF, const VarDecl *VD, QualType T, llvm::Value *V, llvm::Type *RealVarTy, CharUnits Alignment, SourceLocation Loc) { - V = CGF.CGM.getOpenMPRuntime().getOMPAddrOfThreadPrivate(CGF, VD, V, Loc); + V = CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, V, Loc); V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy); return CGF.MakeAddrLValue(V, T, Alignment); } @@ -2050,9 +2078,8 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { assert(E->getSubExpr()->getType()->isAnyComplexType()); unsigned Idx = E->getOpcode() == UO_Imag; - return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(), - Idx, "idx"), - ExprTy); + return MakeAddrLValue( + Builder.CreateStructGEP(nullptr, LV.getAddress(), Idx, "idx"), ExprTy); } case UO_PreInc: case UO_PreDec: { @@ -2217,7 +2244,8 @@ enum class CheckRecoverableKind { }; } -static CheckRecoverableKind getRecoverableKind(SanitizerKind Kind) { +static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) { + assert(llvm::countPopulation(Kind) == 1); switch (Kind) { case SanitizerKind::Vptr: return CheckRecoverableKind::AlwaysRecoverable; @@ -2264,7 +2292,7 @@ static void emitCheckHandlerCall(CodeGenFunction &CGF, } void CodeGenFunction::EmitCheck( - ArrayRef<std::pair<llvm::Value *, SanitizerKind>> Checked, + ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked, StringRef CheckName, ArrayRef<llvm::Constant *> StaticArgs, ArrayRef<llvm::Value *> DynamicArgs) { assert(IsSanitizerScope); @@ -2376,7 +2404,7 @@ void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked) { Builder.CreateCondBr(Checked, Cont, TrapBB); EmitBlock(TrapBB); llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap); - llvm::CallInst *TrapCall = Builder.CreateCall(F); + llvm::CallInst *TrapCall = Builder.CreateCall(F, {}); TrapCall->setDoesNotReturn(); TrapCall->setDoesNotThrow(); Builder.CreateUnreachable(); @@ -2648,7 +2676,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base, unsigned Idx = RL.getLLVMFieldNo(field); if (Idx != 0) // For structs, we GEP to the field that the record layout suggests. - Addr = Builder.CreateStructGEP(Addr, Idx, field->getName()); + Addr = Builder.CreateStructGEP(nullptr, Addr, Idx, field->getName()); // Get the access type. llvm::Type *PtrTy = llvm::Type::getIntNPtrTy( getLLVMContext(), Info.StorageSize, @@ -2683,7 +2711,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base, } else { // For structs, we GEP to the field that the record layout suggests. unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); - addr = Builder.CreateStructGEP(addr, idx, field->getName()); + addr = Builder.CreateStructGEP(nullptr, addr, idx, field->getName()); // If this is a reference field, load the reference right now. if (const ReferenceType *refType = type->getAs<ReferenceType>()) { @@ -2762,7 +2790,7 @@ CodeGenFunction::EmitLValueForFieldInitialization(LValue Base, const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(Field->getParent()); unsigned idx = RL.getLLVMFieldNo(Field); - llvm::Value *V = Builder.CreateStructGEP(Base.getAddress(), idx); + llvm::Value *V = Builder.CreateStructGEP(nullptr, Base.getAddress(), idx); assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs"); // Make sure that the address is pointing to the right type. This is critical @@ -2834,7 +2862,6 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) { } OpaqueValueMapping binding(*this, expr); - RegionCounter Cnt = getPGORegionCounter(expr); const Expr *condExpr = expr->getCond(); bool CondExprBool; @@ -2845,7 +2872,7 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) { if (!ContainsLabel(dead)) { // If the true case is live, we need to track its region. if (CondExprBool) - Cnt.beginRegion(Builder); + incrementProfileCounter(expr); return EmitLValue(live); } } @@ -2855,11 +2882,11 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) { llvm::BasicBlock *contBlock = createBasicBlock("cond.end"); ConditionalEvaluation eval(*this); - EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock, Cnt.getCount()); + EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock, getProfileCount(expr)); // Any temporaries created here are conditional. EmitBlock(lhsBlock); - Cnt.beginRegion(Builder); + incrementProfileCounter(expr); eval.begin(*this); Optional<LValue> lhs = EmitLValueOrThrowExpression(*this, expr->getTrueExpr()); @@ -3007,6 +3034,9 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(), Derived, E->getType()); + if (SanOpts.has(SanitizerKind::CFIDerivedCast)) + EmitVTablePtrCheckForCast(E->getType(), Derived, /*MayBeNull=*/false); + return MakeAddrLValue(Derived, E->getType()); } case CK_LValueBitCast: { @@ -3016,6 +3046,10 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { LValue LV = EmitLValue(E->getSubExpr()); llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), ConvertType(CE->getTypeAsWritten())); + + if (SanOpts.has(SanitizerKind::CFIUnrelatedCast)) + EmitVTablePtrCheckForCast(E->getType(), V, /*MayBeNull=*/false); + return MakeAddrLValue(V, E->getType()); } case CK_ObjCObjectLValueCast: { @@ -3059,16 +3093,6 @@ RValue CodeGenFunction::EmitRValueForField(LValue LV, RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue) { - // Force column info to be generated so we can differentiate - // multiple call sites on the same line in the debug info. - // FIXME: This is insufficient. Two calls coming from the same macro - // expansion will still get the same line/column and break debug info. It's - // possible that LLVM can be fixed to not rely on this uniqueness, at which - // point this workaround can be removed. - ApplyDebugLocation DL(*this, E->getLocStart(), - E->getDirectCallee() && - E->getDirectCallee()->isInlineSpecified()); - // Builtins never have block type. if (E->getCallee()->getType()->isBlockPointerType()) return EmitBlockCallExpr(E, ReturnValue); @@ -3202,7 +3226,7 @@ LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { if (!RV.isScalar()) return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); - assert(E->getCallReturnType()->isReferenceType() && + assert(E->getCallReturnType(getContext())->isReferenceType() && "Can't have a scalar return unless the return type is a " "reference type!"); @@ -3328,16 +3352,6 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee, const auto *FnType = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType()); - // Force column info to differentiate multiple inlined call sites on - // the same line, analoguous to EmitCallExpr. - // FIXME: This is insufficient. Two calls coming from the same macro expansion - // will still get the same line/column and break debug info. It's possible - // that LLVM can be fixed to not rely on this uniqueness, at which point this - // workaround can be removed. - bool ForceColumnInfo = false; - if (const FunctionDecl* FD = dyn_cast_or_null<const FunctionDecl>(TargetDecl)) - ForceColumnInfo = FD->isInlineSpecified(); - if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function) && (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) { if (llvm::Constant *PrefixSig = @@ -3355,7 +3369,7 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee, llvm::Value *CalleePrefixStruct = Builder.CreateBitCast( Callee, llvm::PointerType::getUnqual(PrefixStructTy)); llvm::Value *CalleeSigPtr = - Builder.CreateConstGEP2_32(CalleePrefixStruct, 0, 0); + Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 0); llvm::Value *CalleeSig = Builder.CreateLoad(CalleeSigPtr); llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig); @@ -3365,7 +3379,7 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee, EmitBlock(TypeCheck); llvm::Value *CalleeRTTIPtr = - Builder.CreateConstGEP2_32(CalleePrefixStruct, 0, 1); + Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 1); llvm::Value *CalleeRTTI = Builder.CreateLoad(CalleeRTTIPtr); llvm::Value *CalleeRTTIMatch = Builder.CreateICmpEQ(CalleeRTTI, FTRTTIConst); @@ -3386,8 +3400,7 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee, Args.add(RValue::get(Builder.CreateBitCast(Chain, CGM.VoidPtrTy)), CGM.getContext().VoidPtrTy); EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), E->arg_begin(), - E->arg_end(), E->getDirectCallee(), /*ParamsToSkip*/ 0, - ForceColumnInfo); + E->arg_end(), E->getDirectCallee(), /*ParamsToSkip*/ 0); const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall( Args, FnType, /*isChainCall=*/Chain); diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp index 4cf94c0..6fedf0e 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp @@ -98,6 +98,11 @@ public: // Visitor Methods //===--------------------------------------------------------------------===// + void Visit(Expr *E) { + ApplyDebugLocation DL(CGF, E); + StmtVisitor<AggExprEmitter>::Visit(E); + } + void VisitStmt(Stmt *S) { CGF.ErrorUnsupported(S, "aggregate expression"); } @@ -207,7 +212,7 @@ void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) { LValue LV = CGF.EmitLValue(E); // If the type of the l-value is atomic, then do an atomic load. - if (LV.getType()->isAtomicType()) { + if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)) { CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest); return; } @@ -579,7 +584,12 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { } case CK_ToUnion: { - if (Dest.isIgnored()) break; + // Evaluate even if the destination is ignored. + if (Dest.isIgnored()) { + CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(), + /*ignoreResult=*/true); + break; + } // GCC union extension QualType Ty = E->getSubExpr()->getType(); @@ -640,7 +650,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { // Build a GEP to refer to the subobject. llvm::Value *valueAddr = - CGF.Builder.CreateStructGEP(valueDest.getAddr(), 0); + CGF.Builder.CreateStructGEP(nullptr, valueDest.getAddr(), 0); valueDest = AggValueSlot::forAddr(valueAddr, valueDest.getAlignment(), valueDest.getQualifiers(), @@ -661,7 +671,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { CGF.EmitAggExpr(E->getSubExpr(), atomicSlot); llvm::Value *valueAddr = - Builder.CreateStructGEP(atomicSlot.getAddr(), 0); + Builder.CreateStructGEP(nullptr, atomicSlot.getAddr(), 0); RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile()); return EmitFinalDestCopy(valueType, rvalue); } @@ -736,7 +746,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { } void AggExprEmitter::VisitCallExpr(const CallExpr *E) { - if (E->getCallReturnType()->isReferenceType()) { + if (E->getCallReturnType(CGF.getContext())->isReferenceType()) { EmitAggLoadOfLValue(E); return; } @@ -860,7 +870,8 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); // That copy is an atomic copy if the LHS is atomic. - if (LHS.getType()->isAtomicType()) { + if (LHS.getType()->isAtomicType() || + CGF.LValueIsSuitableForInlineAtomic(LHS)) { CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false); return; } @@ -877,7 +888,8 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { // If we have an atomic type, evaluate into the destination and then // do an atomic copy. - if (LHS.getType()->isAtomicType()) { + if (LHS.getType()->isAtomicType() || + CGF.LValueIsSuitableForInlineAtomic(LHS)) { EnsureDest(E->getRHS()->getType()); Visit(E->getRHS()); CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false); @@ -909,16 +921,16 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { // Bind the common expression if necessary. CodeGenFunction::OpaqueValueMapping binding(CGF, E); - RegionCounter Cnt = CGF.getPGORegionCounter(E); CodeGenFunction::ConditionalEvaluation eval(CGF); - CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock, Cnt.getCount()); + CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock, + CGF.getProfileCount(E)); // Save whether the destination's lifetime is externally managed. bool isExternallyDestructed = Dest.isExternallyDestructed(); eval.begin(CGF); CGF.EmitBlock(LHSBlock); - Cnt.beginRegion(Builder); + CGF.incrementProfileCounter(E); Visit(E->getTrueExpr()); eval.end(CGF); @@ -1408,7 +1420,8 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr, assert((Record->hasTrivialCopyConstructor() || Record->hasTrivialCopyAssignment() || Record->hasTrivialMoveConstructor() || - Record->hasTrivialMoveAssignment()) && + Record->hasTrivialMoveAssignment() || + Record->isUnion()) && "Trying to aggregate-copy a type without a trivial copy/move " "constructor or assignment operator"); // Ignore empty classes in C++. @@ -1439,7 +1452,34 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr, if (alignment.isZero()) alignment = TypeInfo.second; - // FIXME: Handle variable sized types. + llvm::Value *SizeVal = nullptr; + if (TypeInfo.first.isZero()) { + // But note that getTypeInfo returns 0 for a VLA. + if (auto *VAT = dyn_cast_or_null<VariableArrayType>( + getContext().getAsArrayType(Ty))) { + QualType BaseEltTy; + SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr); + TypeInfo = getContext().getTypeInfoDataSizeInChars(BaseEltTy); + std::pair<CharUnits, CharUnits> LastElementTypeInfo; + if (!isAssignment) + LastElementTypeInfo = getContext().getTypeInfoInChars(BaseEltTy); + assert(!TypeInfo.first.isZero()); + SizeVal = Builder.CreateNUWMul( + SizeVal, + llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity())); + if (!isAssignment) { + SizeVal = Builder.CreateNUWSub( + SizeVal, + llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity())); + SizeVal = Builder.CreateNUWAdd( + SizeVal, llvm::ConstantInt::get( + SizeTy, LastElementTypeInfo.first.getQuantity())); + } + } + } + if (!SizeVal) { + SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.first.getQuantity()); + } // FIXME: If we have a volatile struct, the optimizer can remove what might // appear to be `extra' memory ops: @@ -1470,9 +1510,6 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr, } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) { RecordDecl *Record = RecordTy->getDecl(); if (Record->hasObjectMember()) { - CharUnits size = TypeInfo.first; - llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); - llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity()); CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, SizeVal); return; @@ -1481,10 +1518,6 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr, QualType BaseType = getContext().getBaseElementType(Ty); if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) { if (RecordTy->getDecl()->hasObjectMember()) { - CharUnits size = TypeInfo.first; - llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); - llvm::Value *SizeVal = - llvm::ConstantInt::get(SizeTy, size.getQuantity()); CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, SizeVal); return; @@ -1497,9 +1530,6 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr, // the optimizer wishes to expand it in to scalar memory operations. llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty); - Builder.CreateMemCpy(DestPtr, SrcPtr, - llvm::ConstantInt::get(IntPtrTy, - TypeInfo.first.getQuantity()), - alignment.getQuantity(), isVolatile, - /*TBAATag=*/nullptr, TBAAStructTag); + Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, alignment.getQuantity(), + isVolatile, /*TBAATag=*/nullptr, TBAAStructTag); } diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp index 6d63b3a..13dfbb3 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp @@ -173,7 +173,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr( This = EmitLValue(Base).getAddress(); - if (MD->isTrivial()) { + if (MD->isTrivial() || (MD->isDefaulted() && MD->getParent()->isUnion())) { if (isa<CXXDestructorDecl>(MD)) return RValue::get(nullptr); if (isa<CXXConstructorDecl>(MD) && cast<CXXConstructorDecl>(MD)->isDefaultConstructor()) @@ -256,6 +256,12 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr( } else if (UseVirtualCall) { Callee = CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, Ty); } else { + if (SanOpts.has(SanitizerKind::CFINVCall) && + MD->getParent()->isDynamicClass()) { + llvm::Value *VTable = GetVTablePtr(This, Int8PtrTy); + EmitVTablePtrCheckForCall(MD, VTable); + } + if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier) Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty); else if (!DevirtualizedMethod) @@ -684,7 +690,7 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, llvm::Value *tsmV = llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier); llvm::Value *result = - CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV); + CGF.Builder.CreateCall(umul_with_overflow, {size, tsmV}); llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1); if (hasOverflow) @@ -723,7 +729,7 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize); llvm::Value *result = - CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV); + CGF.Builder.CreateCall(uadd_with_overflow, {size, cookieSizeV}); llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1); if (hasOverflow) @@ -778,12 +784,10 @@ static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init, llvm_unreachable("bad evaluation kind"); } -void -CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E, - QualType ElementType, - llvm::Value *BeginPtr, - llvm::Value *NumElements, - llvm::Value *AllocSizeWithoutCookie) { +void CodeGenFunction::EmitNewArrayInitializer( + const CXXNewExpr *E, QualType ElementType, llvm::Type *ElementTy, + llvm::Value *BeginPtr, llvm::Value *NumElements, + llvm::Value *AllocSizeWithoutCookie) { // If we have a type with trivial initialization and no initializer, // there's nothing to do. if (!E->hasInitializer()) @@ -809,7 +813,8 @@ CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E, if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>( AllocType->getAsArrayTypeUnsafe())) { unsigned AS = CurPtr->getType()->getPointerAddressSpace(); - llvm::Type *AllocPtrTy = ConvertTypeForMem(AllocType)->getPointerTo(AS); + ElementTy = ConvertTypeForMem(AllocType); + llvm::Type *AllocPtrTy = ElementTy->getPointerTo(AS); CurPtr = Builder.CreateBitCast(CurPtr, AllocPtrTy); InitListElements *= getContext().getConstantArrayElementCount(CAT); } @@ -839,7 +844,8 @@ CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E, // initialization loops. StoreAnyExprIntoOneUnit(*this, ILE->getInit(i), ILE->getInit(i)->getType(), CurPtr); - CurPtr = Builder.CreateConstInBoundsGEP1_32(CurPtr, 1, "array.exp.next"); + CurPtr = Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr, 1, + "array.exp.next"); } // The remaining elements are filled with the array filler expression. @@ -1000,7 +1006,7 @@ CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E, // Advance to the next element by adjusting the pointer type as necessary. llvm::Value *NextPtr = - Builder.CreateConstInBoundsGEP1_32(CurPtr, 1, "array.next"); + Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr, 1, "array.next"); // Check whether we've gotten to the end of the array and, if so, // exit the loop. @@ -1012,13 +1018,12 @@ CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E, } static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E, - QualType ElementType, - llvm::Value *NewPtr, - llvm::Value *NumElements, + QualType ElementType, llvm::Type *ElementTy, + llvm::Value *NewPtr, llvm::Value *NumElements, llvm::Value *AllocSizeWithoutCookie) { - ApplyDebugLocation DL(CGF, E->getStartLoc()); + ApplyDebugLocation DL(CGF, E); if (E->isArray()) - CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements, + CGF.EmitNewArrayInitializer(E, ElementType, ElementTy, NewPtr, NumElements, AllocSizeWithoutCookie); else if (const Expr *Init = E->getInitializer()) StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr); @@ -1279,10 +1284,9 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { // Emit a null check on the allocation result if the allocation // function is allowed to return null (because it has a non-throwing - // exception spec; for this part, we inline - // CXXNewExpr::shouldNullCheckAllocation()) and we have an + // exception spec or is the reserved placement new) and we have an // interesting initializer. - bool nullCheck = allocatorType->isNothrow(getContext()) && + bool nullCheck = E->shouldNullCheckAllocation(getContext()) && (!allocType.isPODType(getContext()) || E->hasInitializer()); llvm::BasicBlock *nullCheckBB = nullptr; @@ -1327,11 +1331,11 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { E, allocType); } - llvm::Type *elementPtrTy - = ConvertTypeForMem(allocType)->getPointerTo(AS); + llvm::Type *elementTy = ConvertTypeForMem(allocType); + llvm::Type *elementPtrTy = elementTy->getPointerTo(AS); llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy); - EmitNewInitializer(*this, E, allocType, result, numElements, + EmitNewInitializer(*this, E, allocType, elementTy, result, numElements, allocSizeWithoutCookie); if (E->isArray()) { // NewPtr is a pointer to the base element type. If we're diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp index 1580bbe..27d1c68 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp @@ -95,6 +95,7 @@ public: //===--------------------------------------------------------------------===// ComplexPairTy Visit(Expr *E) { + ApplyDebugLocation DL(CGF, E); return StmtVisitor<ComplexExprEmitter, ComplexPairTy>::Visit(E); } @@ -316,14 +317,14 @@ ComplexPairTy ComplexExprEmitter::EmitLoadOfLValue(LValue lvalue, llvm::Value *Real=nullptr, *Imag=nullptr; if (!IgnoreReal || isVolatile) { - llvm::Value *RealP = Builder.CreateStructGEP(SrcPtr, 0, + llvm::Value *RealP = Builder.CreateStructGEP(nullptr, SrcPtr, 0, SrcPtr->getName() + ".realp"); Real = Builder.CreateAlignedLoad(RealP, AlignR, isVolatile, SrcPtr->getName() + ".real"); } if (!IgnoreImag || isVolatile) { - llvm::Value *ImagP = Builder.CreateStructGEP(SrcPtr, 1, + llvm::Value *ImagP = Builder.CreateStructGEP(nullptr, SrcPtr, 1, SrcPtr->getName() + ".imagp"); Imag = Builder.CreateAlignedLoad(ImagP, AlignI, isVolatile, SrcPtr->getName() + ".imag"); @@ -335,12 +336,13 @@ ComplexPairTy ComplexExprEmitter::EmitLoadOfLValue(LValue lvalue, /// specified value pointer. void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, LValue lvalue, bool isInit) { - if (lvalue.getType()->isAtomicType()) + if (lvalue.getType()->isAtomicType() || + (!isInit && CGF.LValueIsSuitableForInlineAtomic(lvalue))) return CGF.EmitAtomicStore(RValue::getComplex(Val), lvalue, isInit); llvm::Value *Ptr = lvalue.getAddress(); - llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, "real"); - llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, "imag"); + llvm::Value *RealPtr = Builder.CreateStructGEP(nullptr, Ptr, 0, "real"); + llvm::Value *ImagPtr = Builder.CreateStructGEP(nullptr, Ptr, 1, "imag"); unsigned AlignR = lvalue.getAlignment().getQuantity(); ASTContext &C = CGF.getContext(); QualType ComplexTy = lvalue.getType(); @@ -375,7 +377,7 @@ VisitImaginaryLiteral(const ImaginaryLiteral *IL) { ComplexPairTy ComplexExprEmitter::VisitCallExpr(const CallExpr *E) { - if (E->getCallReturnType()->isReferenceType()) + if (E->getCallReturnType(CGF.getContext())->isReferenceType()) return EmitLoadOfLValue(E); return CGF.EmitCallExpr(E).getComplexVal(); @@ -818,6 +820,8 @@ EmitCompoundAssignLValue(const CompoundAssignOperator *E, TestAndClearIgnoreReal(); TestAndClearIgnoreImag(); QualType LHSTy = E->getLHS()->getType(); + if (const AtomicType *AT = LHSTy->getAs<AtomicType>()) + LHSTy = AT->getValueType(); BinOpInfo OpInfo; @@ -945,13 +949,14 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { // Bind the common expression if necessary. CodeGenFunction::OpaqueValueMapping binding(CGF, E); - RegionCounter Cnt = CGF.getPGORegionCounter(E); + CodeGenFunction::ConditionalEvaluation eval(CGF); - CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock, Cnt.getCount()); + CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock, + CGF.getProfileCount(E)); eval.begin(CGF); CGF.EmitBlock(LHSBlock); - Cnt.beginRegion(Builder); + CGF.incrementProfileCounter(E); ComplexPairTy LHS = Visit(E->getTrueExpr()); LHSBlock = Builder.GetInsertBlock(); CGF.EmitBranch(ContBlock); @@ -1033,7 +1038,7 @@ ComplexPairTy CodeGenFunction::EmitComplexExpr(const Expr *E, bool IgnoreReal, "Invalid complex expression to emit"); return ComplexExprEmitter(*this, IgnoreReal, IgnoreImag) - .Visit(const_cast<Expr*>(E)); + .Visit(const_cast<Expr *>(E)); } void CodeGenFunction::EmitComplexExprIntoLValue(const Expr *E, LValue dest, @@ -1085,8 +1090,8 @@ EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E) { } LValue CodeGenFunction:: -EmitScalarCompooundAssignWithComplex(const CompoundAssignOperator *E, - llvm::Value *&Result) { +EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E, + llvm::Value *&Result) { CompoundFunc Op = getComplexOp(E->getOpcode()); RValue Val; LValue Ret = ComplexExprEmitter(*this).EmitCompoundAssignLValue(E, Op, Val); diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp index 54f7eee..73ca0cc 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp @@ -383,14 +383,19 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) { if (!EltInit) return false; - + if (!Field->isBitField()) { // Handle non-bitfield members. AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit); } else { // Otherwise we have a bitfield. - AppendBitField(*Field, Layout.getFieldOffset(FieldNo), - cast<llvm::ConstantInt>(EltInit)); + if (auto *CI = dyn_cast<llvm::ConstantInt>(EltInit)) { + AppendBitField(*Field, Layout.getFieldOffset(FieldNo), CI); + } else { + // We are trying to initialize a bitfield with a non-trivial constant, + // this must require run-time code. + return false; + } } } @@ -1110,7 +1115,7 @@ llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value, unsigned AS = C->getType()->getPointerAddressSpace(); llvm::Type *CharPtrTy = Int8Ty->getPointerTo(AS); llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, CharPtrTy); - Casted = llvm::ConstantExpr::getGetElementPtr(Casted, Offset); + Casted = llvm::ConstantExpr::getGetElementPtr(Int8Ty, Casted, Offset); C = llvm::ConstantExpr::getPointerCast(Casted, C->getType()); } @@ -1403,10 +1408,6 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) { llvm::Constant *Element = EmitNullConstant(ElementTy); unsigned NumElements = CAT->getSize().getZExtValue(); - - if (Element->isNullValue()) - return llvm::ConstantAggregateZero::get(ATy); - SmallVector<llvm::Constant *, 8> Array(NumElements, Element); return llvm::ConstantArray::get(ATy, Array); } @@ -1416,8 +1417,7 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) { return ::EmitNullConstant(*this, RD, /*complete object*/ true); } - assert(T->isMemberPointerType() && "Should only see member pointers here!"); - assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() && + assert(T->isMemberDataPointerType() && "Should only see pointers to data members here!"); return getCXXABI().EmitNullMemberPointer(T->castAs<MemberPointerType>()); diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp index 3be14c8..08c81c0 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp @@ -85,7 +85,7 @@ public: return CGF.EmitCheckedLValue(E, TCK); } - void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerKind>> Checks, + void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info); Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) { @@ -196,7 +196,7 @@ public: //===--------------------------------------------------------------------===// Value *Visit(Expr *E) { - ApplyDebugLocation DL(CGF, E->getLocStart()); + ApplyDebugLocation DL(CGF, E); return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E); } @@ -320,7 +320,7 @@ public: Value *VisitCastExpr(CastExpr *E); Value *VisitCallExpr(const CallExpr *E) { - if (E->getCallReturnType()->isReferenceType()) + if (E->getCallReturnType(CGF.getContext())->isReferenceType()) return EmitLoadOfLValue(E); Value *V = CGF.EmitCallExpr(E).getScalarVal(); @@ -349,10 +349,9 @@ public: return EmitScalarPrePostIncDec(E, LV, true, true); } - llvm::Value *EmitAddConsiderOverflowBehavior(const UnaryOperator *E, - llvm::Value *InVal, - llvm::Value *NextVal, - bool IsInc); + llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E, + llvm::Value *InVal, + bool IsInc); llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre); @@ -745,23 +744,37 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, QualType OrigSrcType = SrcType; llvm::Type *SrcTy = Src->getType(); - // If casting to/from storage-only half FP, use special intrinsics. - if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType && - !CGF.getContext().getLangOpts().HalfArgsAndReturns) { - Src = Builder.CreateCall( - CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, - CGF.CGM.FloatTy), - Src); - SrcType = CGF.getContext().FloatTy; - SrcTy = CGF.FloatTy; - } - // Handle conversions to bool first, they are special: comparisons against 0. if (DstType->isBooleanType()) return EmitConversionToBool(Src, SrcType); llvm::Type *DstTy = ConvertType(DstType); + // Cast from half through float if half isn't a native type. + if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { + // Cast to FP using the intrinsic if the half type itself isn't supported. + if (DstTy->isFloatingPointTy()) { + if (!CGF.getContext().getLangOpts().HalfArgsAndReturns) + return Builder.CreateCall( + CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy), + Src); + } else { + // Cast to other types through float, using either the intrinsic or FPExt, + // depending on whether the half type itself is supported + // (as opposed to operations on half, available with NativeHalfType). + if (!CGF.getContext().getLangOpts().HalfArgsAndReturns) { + Src = Builder.CreateCall( + CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, + CGF.CGM.FloatTy), + Src); + } else { + Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv"); + } + SrcType = CGF.getContext().FloatTy; + SrcTy = CGF.FloatTy; + } + } + // Ignore conversions like int -> uint. if (SrcTy == DstTy) return Src; @@ -818,10 +831,20 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy); - // Cast to half via float - if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType && - !CGF.getContext().getLangOpts().HalfArgsAndReturns) + // Cast to half through float if half isn't a native type. + if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { + // Make sure we cast in a single step if from another FP type. + if (SrcTy->isFloatingPointTy()) { + // Use the intrinsic if the half type itself isn't supported + // (as opposed to operations on half, available with NativeHalfType). + if (!CGF.getContext().getLangOpts().HalfArgsAndReturns) + return Builder.CreateCall( + CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src); + // If the half type is supported, just use an fptrunc. + return Builder.CreateFPTrunc(Src, DstTy); + } DstTy = CGF.FloatTy; + } if (isa<llvm::IntegerType>(SrcTy)) { bool InputSigned = SrcType->isSignedIntegerOrEnumerationType(); @@ -847,10 +870,14 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, } if (DstTy != ResTy) { - assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion"); - Res = Builder.CreateCall( + if (!CGF.getContext().getLangOpts().HalfArgsAndReturns) { + assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion"); + Res = Builder.CreateCall( CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy), Res); + } else { + Res = Builder.CreateFPTrunc(Res, ResTy, "conv"); + } } return Res; @@ -889,7 +916,7 @@ Value *ScalarExprEmitter::EmitNullValue(QualType Ty) { /// operation). The check passes if all values in \p Checks (which are \c i1), /// are \c true. void ScalarExprEmitter::EmitBinOpCheck( - ArrayRef<std::pair<Value *, SanitizerKind>> Checks, const BinOpInfo &Info) { + ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) { assert(CGF.IsSanitizerScope); StringRef CheckName; SmallVector<llvm::Constant *, 4> StaticData; @@ -1355,6 +1382,13 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { llvm_unreachable("wrong cast for pointers in different address spaces" "(must be an address space cast)!"); } + + if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) { + if (auto PT = DestTy->getAs<PointerType>()) + CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src, + /*MayBeNull=*/true); + } + return Builder.CreateBitCast(Src, DstTy); } case CK_AddressSpaceConversion: { @@ -1384,6 +1418,10 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(), Derived, DestTy->getPointeeType()); + if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast)) + CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived, + /*MayBeNull=*/true); + return Derived; } case CK_UncheckedDerivedToBase: @@ -1412,13 +1450,13 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { // anything here. if (!E->getType()->isVariableArrayType()) { assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer"); + llvm::Type *NewTy = ConvertType(E->getType()); V = CGF.Builder.CreatePointerCast( - V, ConvertType(E->getType())->getPointerTo( - V->getType()->getPointerAddressSpace())); + V, NewTy->getPointerTo(V->getType()->getPointerAddressSpace())); assert(isa<llvm::ArrayType>(V->getType()->getPointerElementType()) && "Expected pointer to array"); - V = Builder.CreateStructGEP(V, 0, "arraydecay"); + V = Builder.CreateStructGEP(NewTy, V, 0, "arraydecay"); } // Make sure the array decay ends up being the right type. This matters if @@ -1571,26 +1609,32 @@ Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) { // Unary Operators //===----------------------------------------------------------------------===// -llvm::Value *ScalarExprEmitter:: -EmitAddConsiderOverflowBehavior(const UnaryOperator *E, - llvm::Value *InVal, - llvm::Value *NextVal, bool IsInc) { +static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, + llvm::Value *InVal, bool IsInc) { + BinOpInfo BinOp; + BinOp.LHS = InVal; + BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false); + BinOp.Ty = E->getType(); + BinOp.Opcode = IsInc ? BO_Add : BO_Sub; + BinOp.FPContractable = false; + BinOp.E = E; + return BinOp; +} + +llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior( + const UnaryOperator *E, llvm::Value *InVal, bool IsInc) { + llvm::Value *Amount = + llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true); + StringRef Name = IsInc ? "inc" : "dec"; switch (CGF.getLangOpts().getSignedOverflowBehavior()) { case LangOptions::SOB_Defined: - return Builder.CreateAdd(InVal, NextVal, IsInc ? "inc" : "dec"); + return Builder.CreateAdd(InVal, Amount, Name); case LangOptions::SOB_Undefined: if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) - return Builder.CreateNSWAdd(InVal, NextVal, IsInc ? "inc" : "dec"); + return Builder.CreateNSWAdd(InVal, Amount, Name); // Fall through. case LangOptions::SOB_Trapping: - BinOpInfo BinOp; - BinOp.LHS = InVal; - BinOp.RHS = NextVal; - BinOp.Ty = E->getType(); - BinOp.Opcode = BO_Add; - BinOp.FPContractable = false; - BinOp.E = E; - return EmitOverflowCheckedBinOp(BinOp); + return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, InVal, IsInc)); } llvm_unreachable("Unknown SignedOverflowBehaviorTy"); } @@ -1668,27 +1712,20 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, // Most common case by far: integer increment. } else if (type->isIntegerType()) { - - llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true); - // Note that signed integer inc/dec with width less than int can't // overflow because of promotion rules; we're just eliding a few steps here. bool CanOverflow = value->getType()->getIntegerBitWidth() >= CGF.IntTy->getIntegerBitWidth(); if (CanOverflow && type->isSignedIntegerOrEnumerationType()) { - value = EmitAddConsiderOverflowBehavior(E, value, amt, isInc); + value = EmitIncDecConsiderOverflowBehavior(E, value, isInc); } else if (CanOverflow && type->isUnsignedIntegerType() && CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) { - BinOpInfo BinOp; - BinOp.LHS = value; - BinOp.RHS = llvm::ConstantInt::get(value->getType(), 1, false); - BinOp.Ty = E->getType(); - BinOp.Opcode = isInc ? BO_Add : BO_Sub; - BinOp.FPContractable = false; - BinOp.E = E; - value = EmitOverflowCheckedBinOp(BinOp); - } else + value = + EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, value, isInc)); + } else { + llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true); value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); + } // Next most common: pointer increment. } else if (const PointerType *ptr = type->getAs<PointerType>()) { @@ -1742,13 +1779,16 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, // Add the inc/dec to the real part. llvm::Value *amt; - if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType && - !CGF.getContext().getLangOpts().HalfArgsAndReturns) { + if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { // Another special case: half FP increment should be done via float - value = Builder.CreateCall( - CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, - CGF.CGM.FloatTy), - input); + if (!CGF.getContext().getLangOpts().HalfArgsAndReturns) { + value = Builder.CreateCall( + CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, + CGF.CGM.FloatTy), + input, "incdec.conv"); + } else { + value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv"); + } } if (value->getType()->isFloatTy()) @@ -1758,20 +1798,29 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, amt = llvm::ConstantFP::get(VMContext, llvm::APFloat(static_cast<double>(amount))); else { + // Remaining types are either Half or LongDouble. Convert from float. llvm::APFloat F(static_cast<float>(amount)); bool ignored; - F.convert(CGF.getTarget().getLongDoubleFormat(), + // Don't use getFloatTypeSemantics because Half isn't + // necessarily represented using the "half" LLVM type. + F.convert(value->getType()->isHalfTy() + ? CGF.getTarget().getHalfFormat() + : CGF.getTarget().getLongDoubleFormat(), llvm::APFloat::rmTowardZero, &ignored); amt = llvm::ConstantFP::get(VMContext, F); } value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec"); - if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType && - !CGF.getContext().getLangOpts().HalfArgsAndReturns) - value = Builder.CreateCall( - CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, - CGF.CGM.FloatTy), - value); + if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { + if (!CGF.getContext().getLangOpts().HalfArgsAndReturns) { + value = Builder.CreateCall( + CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, + CGF.CGM.FloatTy), + value, "incdec.conv"); + } else { + value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv"); + } + } // Objective-C pointer types. } else { @@ -1794,10 +1843,9 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, llvm::BasicBlock *opBB = Builder.GetInsertBlock(); llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn); auto Pair = CGF.EmitAtomicCompareExchange( - LV, RValue::get(atomicPHI), RValue::get(CGF.EmitToMemory(value, type)), - E->getExprLoc()); - llvm::Value *old = Pair.first.getScalarVal(); - llvm::Value *success = Pair.second.getScalarVal(); + LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc()); + llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type); + llvm::Value *success = Pair.second; atomicPHI->addIncoming(old, opBB); Builder.CreateCondBr(success, contBB, opBB); Builder.SetInsertPoint(contBB); @@ -2056,7 +2104,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue( BinOpInfo OpInfo; if (E->getComputationResultType()->isAnyComplexType()) - return CGF.EmitScalarCompooundAssignWithComplex(E, Result); + return CGF.EmitScalarCompoundAssignWithComplex(E, Result); // Emit the RHS first. __block variables need to have the rhs evaluated // first, plus this should improve codegen a little. @@ -2138,10 +2186,9 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue( llvm::BasicBlock *opBB = Builder.GetInsertBlock(); llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn); auto Pair = CGF.EmitAtomicCompareExchange( - LHSLV, RValue::get(atomicPHI), - RValue::get(CGF.EmitToMemory(Result, LHSTy)), E->getExprLoc()); - llvm::Value *old = Pair.first.getScalarVal(); - llvm::Value *success = Pair.second.getScalarVal(); + LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc()); + llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy); + llvm::Value *success = Pair.second; atomicPHI->addIncoming(old, opBB); Builder.CreateCondBr(success, contBB, opBB); Builder.SetInsertPoint(contBB); @@ -2184,7 +2231,7 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E, void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck( const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) { - SmallVector<std::pair<llvm::Value *, SanitizerKind>, 2> Checks; + SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks; if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) { Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero), @@ -2296,7 +2343,7 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy); - Value *resultAndOverflow = Builder.CreateCall2(intrinsic, Ops.LHS, Ops.RHS); + Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS}); Value *result = Builder.CreateExtractValue(resultAndOverflow, 0); Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1); @@ -2309,7 +2356,7 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) { CodeGenFunction::SanitizerScope SanScope(&CGF); llvm::Value *NotOverflow = Builder.CreateNot(overflow); - SanitizerKind Kind = isSigned ? SanitizerKind::SignedIntegerOverflow + SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow : SanitizerKind::UnsignedIntegerOverflow; EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops); } else @@ -2476,10 +2523,9 @@ static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend, "neg"); } - Value *FMulAdd = - Builder.CreateCall3( + Value *FMulAdd = Builder.CreateCall( CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()), - MulOp0, MulOp1, Addend); + {MulOp0, MulOp1, Addend}); MulOp->eraseFromParent(); return FMulAdd; @@ -2664,21 +2710,34 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { if (Ops.LHS->getType() != RHS->getType()) RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); - if (CGF.SanOpts.has(SanitizerKind::Shift) && !CGF.getLangOpts().OpenCL && - isa<llvm::IntegerType>(Ops.LHS->getType())) { + bool SanitizeBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) && + Ops.Ty->hasSignedIntegerRepresentation(); + bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent); + // OpenCL 6.3j: shift values are effectively % word size of LHS. + if (CGF.getLangOpts().OpenCL) + RHS = + Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shl.mask"); + else if ((SanitizeBase || SanitizeExponent) && + isa<llvm::IntegerType>(Ops.LHS->getType())) { CodeGenFunction::SanitizerScope SanScope(&CGF); + SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks; llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, RHS); - llvm::Value *Valid = Builder.CreateICmpULE(RHS, WidthMinusOne); + llvm::Value *ValidExponent = Builder.CreateICmpULE(RHS, WidthMinusOne); - if (Ops.Ty->hasSignedIntegerRepresentation()) { - llvm::BasicBlock *Orig = Builder.GetInsertBlock(); - llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); - llvm::BasicBlock *CheckBitsShifted = CGF.createBasicBlock("check"); - Builder.CreateCondBr(Valid, CheckBitsShifted, Cont); + if (SanitizeExponent) { + Checks.push_back( + std::make_pair(ValidExponent, SanitizerKind::ShiftExponent)); + } + if (SanitizeBase) { // Check whether we are shifting any non-zero bits off the top of the - // integer. - CGF.EmitBlock(CheckBitsShifted); + // integer. We only emit this check if exponent is valid - otherwise + // instructions below will have undefined behavior themselves. + llvm::BasicBlock *Orig = Builder.GetInsertBlock(); + llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); + llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check"); + Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont); + CGF.EmitBlock(CheckShiftBase); llvm::Value *BitsShiftedOff = Builder.CreateLShr(Ops.LHS, Builder.CreateSub(WidthMinusOne, RHS, "shl.zeros", @@ -2693,19 +2752,17 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One); } llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0); - llvm::Value *SecondCheck = Builder.CreateICmpEQ(BitsShiftedOff, Zero); + llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero); CGF.EmitBlock(Cont); - llvm::PHINode *P = Builder.CreatePHI(Valid->getType(), 2); - P->addIncoming(Valid, Orig); - P->addIncoming(SecondCheck, CheckBitsShifted); - Valid = P; + llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2); + BaseCheck->addIncoming(Builder.getTrue(), Orig); + BaseCheck->addIncoming(ValidBase, CheckShiftBase); + Checks.push_back(std::make_pair(BaseCheck, SanitizerKind::ShiftBase)); } - EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::Shift), Ops); + assert(!Checks.empty()); + EmitBinOpCheck(Checks, Ops); } - // OpenCL 6.3j: shift values are effectively % word size of LHS. - if (CGF.getLangOpts().OpenCL) - RHS = Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shl.mask"); return Builder.CreateShl(Ops.LHS, RHS, "shl"); } @@ -2717,18 +2774,18 @@ Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) { if (Ops.LHS->getType() != RHS->getType()) RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); - if (CGF.SanOpts.has(SanitizerKind::Shift) && !CGF.getLangOpts().OpenCL && - isa<llvm::IntegerType>(Ops.LHS->getType())) { + // OpenCL 6.3j: shift values are effectively % word size of LHS. + if (CGF.getLangOpts().OpenCL) + RHS = + Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shr.mask"); + else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) && + isa<llvm::IntegerType>(Ops.LHS->getType())) { CodeGenFunction::SanitizerScope SanScope(&CGF); llvm::Value *Valid = Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS)); - EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::Shift), Ops); + EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops); } - // OpenCL 6.3j: shift values are effectively % word size of LHS. - if (CGF.getLangOpts().OpenCL) - RHS = Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shr.mask"); - if (Ops.Ty->hasUnsignedIntegerRepresentation()) return Builder.CreateLShr(Ops.LHS, RHS, "shr"); return Builder.CreateAShr(Ops.LHS, RHS, "shr"); @@ -2846,7 +2903,7 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc, Value *CR6Param = Builder.getInt32(CR6); llvm::Function *F = CGF.CGM.getIntrinsic(ID); - Result = Builder.CreateCall3(F, CR6Param, FirstVecArg, SecondVecArg, ""); + Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg}); return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType()); } @@ -2975,11 +3032,9 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { } Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { - RegionCounter Cnt = CGF.getPGORegionCounter(E); - // Perform vector logical and on comparisons with zero vectors. if (E->getType()->isVectorType()) { - Cnt.beginRegion(Builder); + CGF.incrementProfileCounter(E); Value *LHS = Visit(E->getLHS()); Value *RHS = Visit(E->getRHS()); @@ -3002,7 +3057,7 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { bool LHSCondVal; if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { if (LHSCondVal) { // If we have 1 && X, just emit X. - Cnt.beginRegion(Builder); + CGF.incrementProfileCounter(E); Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); // ZExt result to int or bool. @@ -3020,7 +3075,8 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { CodeGenFunction::ConditionalEvaluation eval(CGF); // Branch on the LHS first. If it is false, go to the failure (cont) block. - CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock, Cnt.getCount()); + CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock, + CGF.getProfileCount(E->getRHS())); // Any edges into the ContBlock are now from an (indeterminate number of) // edges from this first condition. All of these values will be false. Start @@ -3033,7 +3089,7 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { eval.begin(CGF); CGF.EmitBlock(RHSBlock); - Cnt.beginRegion(Builder); + CGF.incrementProfileCounter(E); Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); eval.end(CGF); @@ -3043,7 +3099,7 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { // Emit an unconditional branch from this block to ContBlock. { // There is no need to emit line number for unconditional branch. - ApplyDebugLocation DL(CGF); + auto NL = ApplyDebugLocation::CreateEmpty(CGF); CGF.EmitBlock(ContBlock); } // Insert an entry into the phi node for the edge with the value of RHSCond. @@ -3054,11 +3110,9 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { } Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { - RegionCounter Cnt = CGF.getPGORegionCounter(E); - // Perform vector logical or on comparisons with zero vectors. if (E->getType()->isVectorType()) { - Cnt.beginRegion(Builder); + CGF.incrementProfileCounter(E); Value *LHS = Visit(E->getLHS()); Value *RHS = Visit(E->getRHS()); @@ -3081,7 +3135,7 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { bool LHSCondVal; if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { if (!LHSCondVal) { // If we have 0 || X, just emit X. - Cnt.beginRegion(Builder); + CGF.incrementProfileCounter(E); Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); // ZExt result to int or bool. @@ -3100,7 +3154,8 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { // Branch on the LHS first. If it is true, go to the success (cont) block. CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock, - Cnt.getParentCount() - Cnt.getCount()); + CGF.getCurrentProfileCount() - + CGF.getProfileCount(E->getRHS())); // Any edges into the ContBlock are now from an (indeterminate number of) // edges from this first condition. All of these values will be true. Start @@ -3115,7 +3170,7 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { // Emit the RHS condition as a bool value. CGF.EmitBlock(RHSBlock); - Cnt.beginRegion(Builder); + CGF.incrementProfileCounter(E); Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); eval.end(CGF); @@ -3166,7 +3221,6 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { // Bind the common expression if necessary. CodeGenFunction::OpaqueValueMapping binding(CGF, E); - RegionCounter Cnt = CGF.getPGORegionCounter(E); Expr *condExpr = E->getCond(); Expr *lhsExpr = E->getTrueExpr(); @@ -3182,7 +3236,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { // If the dead side doesn't have labels we need, just emit the Live part. if (!CGF.ContainsLabel(dead)) { if (CondExprBool) - Cnt.beginRegion(Builder); + CGF.incrementProfileCounter(E); Value *Result = Visit(live); // If the live part is a throw expression, it acts like it has a void @@ -3199,7 +3253,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { // the select function. if (CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) { - Cnt.beginRegion(Builder); + CGF.incrementProfileCounter(E); llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); llvm::Value *LHS = Visit(lhsExpr); @@ -3244,7 +3298,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { // safe to evaluate the LHS and RHS unconditionally. if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) && isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) { - Cnt.beginRegion(Builder); + CGF.incrementProfileCounter(E); llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr); llvm::Value *LHS = Visit(lhsExpr); @@ -3262,10 +3316,11 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); CodeGenFunction::ConditionalEvaluation eval(CGF); - CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock, Cnt.getCount()); + CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock, + CGF.getProfileCount(lhsExpr)); CGF.EmitBlock(LHSBlock); - Cnt.beginRegion(Builder); + CGF.incrementProfileCounter(E); eval.begin(CGF); Value *LHS = Visit(lhsExpr); eval.end(CGF); @@ -3393,14 +3448,8 @@ Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) { assert(E && hasScalarEvaluationKind(E->getType()) && "Invalid scalar expression to emit"); - bool hasDebugInfo = getDebugInfo(); - if (isa<CXXDefaultArgExpr>(E)) - disableDebugInfo(); - Value *V = ScalarExprEmitter(*this, IgnoreResultAssign) - .Visit(const_cast<Expr*>(E)); - if (isa<CXXDefaultArgExpr>(E) && hasDebugInfo) - enableDebugInfo(); - return V; + return ScalarExprEmitter(*this, IgnoreResultAssign) + .Visit(const_cast<Expr *>(E)); } /// EmitScalarConversion - Emit a conversion from the specified type to the diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGLoopInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGLoopInfo.cpp index 89f43c2..011ae7e 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGLoopInfo.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGLoopInfo.cpp @@ -26,8 +26,8 @@ static MDNode *createMetadata(LLVMContext &Ctx, const LoopAttributes &Attrs) { SmallVector<Metadata *, 4> Args; // Reserve operand 0 for loop id self reference. - MDNode *TempNode = MDNode::getTemporary(Ctx, None); - Args.push_back(TempNode); + auto TempNode = MDNode::getTemporary(Ctx, None); + Args.push_back(TempNode.get()); // Setting vectorizer.width if (Attrs.VectorizerWidth > 0) { @@ -58,7 +58,6 @@ static MDNode *createMetadata(LLVMContext &Ctx, const LoopAttributes &Attrs) { // Set the first operand to itself. MDNode *LoopID = MDNode::get(Ctx, Args); LoopID->replaceOperandWith(0, LoopID); - MDNode::deleteTemporary(TempNode); return LoopID; } diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGLoopInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/CGLoopInfo.h index b169399..aee1621 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGLoopInfo.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGLoopInfo.h @@ -78,8 +78,8 @@ private: /// This stack can be used to prepare attributes which are applied when a loop /// is emitted. class LoopInfoStack { - LoopInfoStack(const LoopInfoStack &) LLVM_DELETED_FUNCTION; - void operator=(const LoopInfoStack &) LLVM_DELETED_FUNCTION; + LoopInfoStack(const LoopInfoStack &) = delete; + void operator=(const LoopInfoStack &) = delete; public: LoopInfoStack() {} diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp index 34c6d94..ef9a92d 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp @@ -102,8 +102,8 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E, ArrayType::Normal, /*IndexTypeQuals=*/0); // Allocate the temporary array(s). - llvm::Value *Objects = CreateMemTemp(ElementArrayType, "objects"); - llvm::Value *Keys = nullptr; + llvm::AllocaInst *Objects = CreateMemTemp(ElementArrayType, "objects"); + llvm::AllocaInst *Keys = nullptr; if (DLE) Keys = CreateMemTemp(ElementArrayType, "keys"); @@ -119,10 +119,9 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E, if (ALE) { // Emit the element and store it to the appropriate array slot. const Expr *Rhs = ALE->getElement(i); - LValue LV = LValue::MakeAddr(Builder.CreateStructGEP(Objects, i), - ElementType, - Context.getTypeAlignInChars(Rhs->getType()), - Context); + LValue LV = LValue::MakeAddr( + Builder.CreateStructGEP(Objects->getAllocatedType(), Objects, i), + ElementType, Context.getTypeAlignInChars(Rhs->getType()), Context); llvm::Value *value = EmitScalarExpr(Rhs); EmitStoreThroughLValue(RValue::get(value), LV, true); @@ -132,19 +131,17 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E, } else { // Emit the key and store it to the appropriate array slot. const Expr *Key = DLE->getKeyValueElement(i).Key; - LValue KeyLV = LValue::MakeAddr(Builder.CreateStructGEP(Keys, i), - ElementType, - Context.getTypeAlignInChars(Key->getType()), - Context); + LValue KeyLV = LValue::MakeAddr( + Builder.CreateStructGEP(Keys->getAllocatedType(), Keys, i), + ElementType, Context.getTypeAlignInChars(Key->getType()), Context); llvm::Value *keyValue = EmitScalarExpr(Key); EmitStoreThroughLValue(RValue::get(keyValue), KeyLV, /*isInit=*/true); // Emit the value and store it to the appropriate array slot. - const Expr *Value = DLE->getKeyValueElement(i).Value; - LValue ValueLV = LValue::MakeAddr(Builder.CreateStructGEP(Objects, i), - ElementType, - Context.getTypeAlignInChars(Value->getType()), - Context); + const Expr *Value = DLE->getKeyValueElement(i).Value; + LValue ValueLV = LValue::MakeAddr( + Builder.CreateStructGEP(Objects->getAllocatedType(), Objects, i), + ElementType, Context.getTypeAlignInChars(Value->getType()), Context); llvm::Value *valueValue = EmitScalarExpr(Value); EmitStoreThroughLValue(RValue::get(valueValue), ValueLV, /*isInit=*/true); if (TrackNeededObjects) { @@ -472,8 +469,7 @@ void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD, args.push_back(OMD->getSelfDecl()); args.push_back(OMD->getCmdDecl()); - for (const auto *PI : OMD->params()) - args.push_back(PI); + args.append(OMD->param_begin(), OMD->param_end()); CurGD = OMD; CurEHLocation = OMD->getLocEnd(); @@ -501,8 +497,7 @@ void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) { StartObjCMethod(OMD, OMD->getClassInterface()); PGO.assignRegionCounters(OMD, CurFn); assert(isa<CompoundStmt>(OMD->getBody())); - RegionCounter Cnt = getPGORegionCounter(OMD->getBody()); - Cnt.beginRegion(Builder); + incrementProfileCounter(OMD->getBody()); EmitCompoundStmtWithoutScope(*cast<CompoundStmt>(OMD->getBody())); FinishFunction(OMD->getBodyRBrace()); } @@ -1435,7 +1430,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ // Fast enumeration state. QualType StateTy = CGM.getObjCFastEnumerationStateType(); - llvm::Value *StatePtr = CreateMemTemp(StateTy, "state.ptr"); + llvm::AllocaInst *StatePtr = CreateMemTemp(StateTy, "state.ptr"); EmitNullInitialization(StatePtr, StateTy); // Number of elements in the items array. @@ -1507,11 +1502,11 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ // If the limit pointer was zero to begin with, the collection is // empty; skip all this. Set the branch weight assuming this has the same // probability of exiting the loop as any other loop exit. - uint64_t EntryCount = PGO.getCurrentRegionCount(); - RegionCounter Cnt = getPGORegionCounter(&S); - Builder.CreateCondBr(Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"), - EmptyBB, LoopInitBB, - PGO.createBranchWeights(EntryCount, Cnt.getCount())); + uint64_t EntryCount = getCurrentProfileCount(); + Builder.CreateCondBr( + Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"), EmptyBB, + LoopInitBB, + createProfileWeights(EntryCount, getProfileCount(S.getBody()))); // Otherwise, initialize the loop. EmitBlock(LoopInitBB); @@ -1519,8 +1514,8 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ // Save the initial mutations value. This is the value at an // address that was written into the state object by // countByEnumeratingWithState:objects:count:. - llvm::Value *StateMutationsPtrPtr = - Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr"); + llvm::Value *StateMutationsPtrPtr = Builder.CreateStructGEP( + StatePtr->getAllocatedType(), StatePtr, 2, "mutationsptr.ptr"); llvm::Value *StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr"); @@ -1540,7 +1535,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ llvm::PHINode *count = Builder.CreatePHI(UnsignedLongLTy, 3, "forcoll.count"); count->addIncoming(initialBufferLimit, LoopInitBB); - Cnt.beginRegion(Builder); + incrementProfileCounter(&S); // Check whether the mutations value has changed from where it was // at start. StateMutationsPtr should actually be invariant between @@ -1600,8 +1595,8 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ // Fetch the buffer out of the enumeration state. // TODO: this pointer should actually be invariant between // refreshes, which would help us do certain loop optimizations. - llvm::Value *StateItemsPtr = - Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr"); + llvm::Value *StateItemsPtr = Builder.CreateStructGEP( + StatePtr->getAllocatedType(), StatePtr, 1, "stateitems.ptr"); llvm::Value *EnumStateItems = Builder.CreateLoad(StateItemsPtr, "stateitems"); @@ -1652,9 +1647,9 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ // Set the branch weights based on the simplifying assumption that this is // like a while-loop, i.e., ignoring that the false branch fetches more // elements and then returns to the loop. - Builder.CreateCondBr(Builder.CreateICmpULT(indexPlusOne, count), - LoopBodyBB, FetchMoreBB, - PGO.createBranchWeights(Cnt.getCount(), EntryCount)); + Builder.CreateCondBr( + Builder.CreateICmpULT(indexPlusOne, count), LoopBodyBB, FetchMoreBB, + createProfileWeights(getProfileCount(S.getBody()), EntryCount)); index->addIncoming(indexPlusOne, AfterBody.getBlock()); count->addIncoming(count, AfterBody.getBlock()); @@ -1985,7 +1980,8 @@ CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) { } // Call the marker asm if we made one, which we do only at -O0. - if (marker) Builder.CreateCall(marker); + if (marker) + Builder.CreateCall(marker, {}); return emitARCValueOperation(*this, value, CGM.getARCEntrypoints().objc_retainAutoreleasedReturnValue, diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp index c0dc3b8..1580c77 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp @@ -46,54 +46,49 @@ namespace { /// avoids constructing the type more than once if it's used more than once. class LazyRuntimeFunction { CodeGenModule *CGM; - std::vector<llvm::Type*> ArgTys; + llvm::FunctionType *FTy; const char *FunctionName; llvm::Constant *Function; - public: - /// Constructor leaves this class uninitialized, because it is intended to - /// be used as a field in another class and not all of the types that are - /// used as arguments will necessarily be available at construction time. - LazyRuntimeFunction() - : CGM(nullptr), FunctionName(nullptr), Function(nullptr) {} - /// Initialises the lazy function with the name, return type, and the types - /// of the arguments. - LLVM_END_WITH_NULL - void init(CodeGenModule *Mod, const char *name, - llvm::Type *RetTy, ...) { - CGM =Mod; - FunctionName = name; - Function = nullptr; - ArgTys.clear(); - va_list Args; - va_start(Args, RetTy); - while (llvm::Type *ArgTy = va_arg(Args, llvm::Type*)) - ArgTys.push_back(ArgTy); - va_end(Args); - // Push the return type on at the end so we can pop it off easily - ArgTys.push_back(RetTy); - } - /// Overloaded cast operator, allows the class to be implicitly cast to an - /// LLVM constant. - operator llvm::Constant*() { - if (!Function) { - if (!FunctionName) return nullptr; - // We put the return type on the end of the vector, so pop it back off - llvm::Type *RetTy = ArgTys.back(); - ArgTys.pop_back(); - llvm::FunctionType *FTy = llvm::FunctionType::get(RetTy, ArgTys, false); - Function = - cast<llvm::Constant>(CGM->CreateRuntimeFunction(FTy, FunctionName)); - // We won't need to use the types again, so we may as well clean up the - // vector now - ArgTys.resize(0); - } - return Function; - } - operator llvm::Function*() { - return cast<llvm::Function>((llvm::Constant*)*this); - } +public: + /// Constructor leaves this class uninitialized, because it is intended to + /// be used as a field in another class and not all of the types that are + /// used as arguments will necessarily be available at construction time. + LazyRuntimeFunction() + : CGM(nullptr), FunctionName(nullptr), Function(nullptr) {} + /// Initialises the lazy function with the name, return type, and the types + /// of the arguments. + LLVM_END_WITH_NULL + void init(CodeGenModule *Mod, const char *name, llvm::Type *RetTy, ...) { + CGM = Mod; + FunctionName = name; + Function = nullptr; + std::vector<llvm::Type *> ArgTys; + va_list Args; + va_start(Args, RetTy); + while (llvm::Type *ArgTy = va_arg(Args, llvm::Type *)) + ArgTys.push_back(ArgTy); + va_end(Args); + FTy = llvm::FunctionType::get(RetTy, ArgTys, false); + } + + llvm::FunctionType *getType() { return FTy; } + + /// Overloaded cast operator, allows the class to be implicitly cast to an + /// LLVM constant. + operator llvm::Constant *() { + if (!Function) { + if (!FunctionName) + return nullptr; + Function = + cast<llvm::Constant>(CGM->CreateRuntimeFunction(FTy, FunctionName)); + } + return Function; + } + operator llvm::Function *() { + return cast<llvm::Function>((llvm::Constant *)*this); + } }; @@ -171,8 +166,9 @@ protected: /// where the C code specifies const char*. llvm::Constant *MakeConstantString(const std::string &Str, const std::string &Name="") { - llvm::Constant *ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str()); - return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros); + auto *ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str()); + return llvm::ConstantExpr::getGetElementPtr(ConstStr->getValueType(), + ConstStr, Zeros); } /// Emits a linkonce_odr string, whose name is the prefix followed by the /// string value. This allows the linker to combine the strings between @@ -181,13 +177,14 @@ protected: llvm::Constant *ExportUniqueString(const std::string &Str, const std::string prefix) { std::string name = prefix + Str; - llvm::Constant *ConstStr = TheModule.getGlobalVariable(name); + auto *ConstStr = TheModule.getGlobalVariable(name); if (!ConstStr) { llvm::Constant *value = llvm::ConstantDataArray::getString(VMContext,Str); ConstStr = new llvm::GlobalVariable(TheModule, value->getType(), true, llvm::GlobalValue::LinkOnceODRLinkage, value, prefix + Str); } - return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros); + return llvm::ConstantExpr::getGetElementPtr(ConstStr->getValueType(), + ConstStr, Zeros); } /// Generates a global structure, initialized by the elements in the vector. /// The element types must match the types of the structure elements in the @@ -237,8 +234,9 @@ protected: NameAndAttributes += TypeStr; NameAndAttributes += '\0'; NameAndAttributes += PD->getNameAsString(); - return llvm::ConstantExpr::getGetElementPtr( - CGM.GetAddrOfConstantCString(NameAndAttributes), Zeros); + auto *ConstStr = CGM.GetAddrOfConstantCString(NameAndAttributes); + return llvm::ConstantExpr::getGetElementPtr(ConstStr->getValueType(), + ConstStr, Zeros); } return MakeConstantString(PD->getNameAsString()); } @@ -672,8 +670,8 @@ class CGObjCGNUstep : public CGObjCGNU { slot->setMetadata(msgSendMDKind, node); // Load the imp from the slot - llvm::Value *imp = - Builder.CreateLoad(Builder.CreateStructGEP(slot.getInstruction(), 4)); + llvm::Value *imp = Builder.CreateLoad( + Builder.CreateStructGEP(nullptr, slot.getInstruction(), 4)); // The lookup function may have changed the receiver, so make sure we use // the new one. @@ -690,7 +688,7 @@ class CGObjCGNUstep : public CGObjCGNU { CGF.EmitNounwindRuntimeCall(SlotLookupSuperFn, lookupArgs); slot->setOnlyReadsMemory(); - return Builder.CreateLoad(Builder.CreateStructGEP(slot, 4)); + return Builder.CreateLoad(Builder.CreateStructGEP(nullptr, slot, 4)); } public: CGObjCGNUstep(CodeGenModule &Mod) : CGObjCGNU(Mod, 9, 3) { @@ -1013,7 +1011,7 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion, llvm::Value *CGObjCGNU::GetClassNamed(CodeGenFunction &CGF, const std::string &Name, bool isWeak) { - llvm::Value *ClassName = CGM.GetAddrOfConstantCString(Name); + llvm::GlobalVariable *ClassNameGV = CGM.GetAddrOfConstantCString(Name); // With the incompatible ABI, this will need to be replaced with a direct // reference to the class symbol. For the compatible nonfragile ABI we are // still performing this lookup at run time but emitting the symbol for the @@ -1023,7 +1021,8 @@ llvm::Value *CGObjCGNU::GetClassNamed(CodeGenFunction &CGF, // with memoized versions or with static references if it's safe to do so. if (!isWeak) EmitClassRef(Name); - ClassName = CGF.Builder.CreateStructGEP(ClassName, 0); + llvm::Value *ClassName = + CGF.Builder.CreateStructGEP(ClassNameGV->getValueType(), ClassNameGV, 0); llvm::Constant *ClassLookupFn = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, PtrToInt8Ty, true), @@ -1056,7 +1055,7 @@ llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF, Selector Sel, } if (!SelValue) { SelValue = llvm::GlobalAlias::create( - SelectorTy->getElementType(), 0, llvm::GlobalValue::PrivateLinkage, + SelectorTy, llvm::GlobalValue::PrivateLinkage, ".objc_selector_" + Sel.getAsString(), &TheModule); Types.push_back(TypedSelector(TypeEncoding, SelValue)); } @@ -1143,21 +1142,22 @@ llvm::Constant *CGObjCGNUstep::GetEHType(QualType T) { // It's quite ugly hard-coding this. Ideally we'd generate it using the host // platform's name mangling. const char *vtableName = "_ZTVN7gnustep7libobjc22__objc_class_type_infoE"; - llvm::Constant *Vtable = TheModule.getGlobalVariable(vtableName); + auto *Vtable = TheModule.getGlobalVariable(vtableName); if (!Vtable) { Vtable = new llvm::GlobalVariable(TheModule, PtrToInt8Ty, true, llvm::GlobalValue::ExternalLinkage, nullptr, vtableName); } llvm::Constant *Two = llvm::ConstantInt::get(IntTy, 2); - Vtable = llvm::ConstantExpr::getGetElementPtr(Vtable, Two); - Vtable = llvm::ConstantExpr::getBitCast(Vtable, PtrToInt8Ty); + auto *BVtable = llvm::ConstantExpr::getBitCast( + llvm::ConstantExpr::getGetElementPtr(Vtable->getValueType(), Vtable, Two), + PtrToInt8Ty); llvm::Constant *typeName = ExportUniqueString(className, "__objc_eh_typename_"); std::vector<llvm::Constant*> fields; - fields.push_back(Vtable); + fields.push_back(BVtable); fields.push_back(typeName); llvm::Constant *TI = MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, @@ -1261,25 +1261,25 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF, if (IsClassMessage) { if (!MetaClassPtrAlias) { MetaClassPtrAlias = llvm::GlobalAlias::create( - IdTy->getElementType(), 0, llvm::GlobalValue::InternalLinkage, + IdTy, llvm::GlobalValue::InternalLinkage, ".objc_metaclass_ref" + Class->getNameAsString(), &TheModule); } ReceiverClass = MetaClassPtrAlias; } else { if (!ClassPtrAlias) { ClassPtrAlias = llvm::GlobalAlias::create( - IdTy->getElementType(), 0, llvm::GlobalValue::InternalLinkage, + IdTy, llvm::GlobalValue::InternalLinkage, ".objc_class_ref" + Class->getNameAsString(), &TheModule); } ReceiverClass = ClassPtrAlias; } } // Cast the pointer to a simplified version of the class structure + llvm::Type *CastTy = llvm::StructType::get(IdTy, IdTy, nullptr); ReceiverClass = Builder.CreateBitCast(ReceiverClass, - llvm::PointerType::getUnqual( - llvm::StructType::get(IdTy, IdTy, nullptr))); + llvm::PointerType::getUnqual(CastTy)); // Get the superclass pointer - ReceiverClass = Builder.CreateStructGEP(ReceiverClass, 1); + ReceiverClass = Builder.CreateStructGEP(CastTy, ReceiverClass, 1); // Load the superclass pointer ReceiverClass = Builder.CreateLoad(ReceiverClass); // Construct the structure used to look up the IMP @@ -1287,8 +1287,10 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF, Receiver->getType(), IdTy, nullptr); llvm::Value *ObjCSuper = Builder.CreateAlloca(ObjCSuperTy); - Builder.CreateStore(Receiver, Builder.CreateStructGEP(ObjCSuper, 0)); - Builder.CreateStore(ReceiverClass, Builder.CreateStructGEP(ObjCSuper, 1)); + Builder.CreateStore(Receiver, + Builder.CreateStructGEP(ObjCSuperTy, ObjCSuper, 0)); + Builder.CreateStore(ReceiverClass, + Builder.CreateStructGEP(ObjCSuperTy, ObjCSuper, 1)); ObjCSuper = EnforceType(Builder, ObjCSuper, PtrToObjCSuperTy); @@ -2294,7 +2296,8 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) { offsetPointerIndexes[2] = llvm::ConstantInt::get(IndexTy, ivarIndex); // Get the correct ivar field llvm::Constant *offsetValue = llvm::ConstantExpr::getGetElementPtr( - IvarList, offsetPointerIndexes); + cast<llvm::GlobalVariable>(IvarList)->getValueType(), IvarList, + offsetPointerIndexes); // Get the existing variable, if one exists. llvm::GlobalVariable *offset = TheModule.getNamedGlobal(Name); if (offset) { @@ -2366,7 +2369,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() { std::vector<llvm::Constant*> Elements; llvm::Constant *Statics = NULLPtr; // Generate statics list: - if (ConstantStrings.size()) { + if (!ConstantStrings.empty()) { llvm::ArrayType *StaticsArrayTy = llvm::ArrayType::get(PtrToInt8Ty, ConstantStrings.size() + 1); ConstantStrings.push_back(NULLPtr); @@ -2439,8 +2442,8 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() { // Number of static selectors Elements.push_back(llvm::ConstantInt::get(LongTy, SelectorCount)); - llvm::Constant *SelectorList = MakeGlobalArray(SelStructTy, Selectors, - ".objc_selector_list"); + llvm::GlobalVariable *SelectorList = + MakeGlobalArray(SelStructTy, Selectors, ".objc_selector_list"); Elements.push_back(llvm::ConstantExpr::getBitCast(SelectorList, SelStructPtrTy)); @@ -2450,8 +2453,8 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() { llvm::Constant *Idxs[] = {Zeros[0], llvm::ConstantInt::get(Int32Ty, i), Zeros[0]}; // FIXME: We're generating redundant loads and stores here! - llvm::Constant *SelPtr = llvm::ConstantExpr::getGetElementPtr(SelectorList, - makeArrayRef(Idxs, 2)); + llvm::Constant *SelPtr = llvm::ConstantExpr::getGetElementPtr( + SelectorList->getValueType(), SelectorList, makeArrayRef(Idxs, 2)); // If selectors are defined as an opaque type, cast the pointer to this // type. SelPtr = llvm::ConstantExpr::getBitCast(SelPtr, SelectorTy); @@ -2562,8 +2565,8 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() { true); if (TheClass) { TheClass = llvm::ConstantExpr::getBitCast(TheClass, PtrTy); - Builder.CreateCall2(RegisterAlias, TheClass, - MakeConstantString(iter->second)); + Builder.CreateCall(RegisterAlias, + {TheClass, MakeConstantString(iter->second)}); } } // Jump to end: @@ -2679,7 +2682,7 @@ llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGenFunction &CGF, llvm::Value *AddrWeakObj) { CGBuilderTy &B = CGF.Builder; AddrWeakObj = EnforceType(B, AddrWeakObj, PtrToIdTy); - return B.CreateCall(WeakReadFn, AddrWeakObj); + return B.CreateCall(WeakReadFn.getType(), WeakReadFn, AddrWeakObj); } void CGObjCGNU::EmitObjCWeakAssign(CodeGenFunction &CGF, @@ -2687,7 +2690,7 @@ void CGObjCGNU::EmitObjCWeakAssign(CodeGenFunction &CGF, CGBuilderTy &B = CGF.Builder; src = EnforceType(B, src, IdTy); dst = EnforceType(B, dst, PtrToIdTy); - B.CreateCall2(WeakAssignFn, src, dst); + B.CreateCall(WeakAssignFn.getType(), WeakAssignFn, {src, dst}); } void CGObjCGNU::EmitObjCGlobalAssign(CodeGenFunction &CGF, @@ -2696,11 +2699,9 @@ void CGObjCGNU::EmitObjCGlobalAssign(CodeGenFunction &CGF, CGBuilderTy &B = CGF.Builder; src = EnforceType(B, src, IdTy); dst = EnforceType(B, dst, PtrToIdTy); - if (!threadlocal) - B.CreateCall2(GlobalAssignFn, src, dst); - else - // FIXME. Add threadloca assign API - llvm_unreachable("EmitObjCGlobalAssign - Threal Local API NYI"); + // FIXME. Add threadloca assign API + assert(!threadlocal && "EmitObjCGlobalAssign - Threal Local API NYI"); + B.CreateCall(GlobalAssignFn.getType(), GlobalAssignFn, {src, dst}); } void CGObjCGNU::EmitObjCIvarAssign(CodeGenFunction &CGF, @@ -2709,7 +2710,7 @@ void CGObjCGNU::EmitObjCIvarAssign(CodeGenFunction &CGF, CGBuilderTy &B = CGF.Builder; src = EnforceType(B, src, IdTy); dst = EnforceType(B, dst, IdTy); - B.CreateCall3(IvarAssignFn, src, dst, ivarOffset); + B.CreateCall(IvarAssignFn.getType(), IvarAssignFn, {src, dst, ivarOffset}); } void CGObjCGNU::EmitObjCStrongCastAssign(CodeGenFunction &CGF, @@ -2717,7 +2718,7 @@ void CGObjCGNU::EmitObjCStrongCastAssign(CodeGenFunction &CGF, CGBuilderTy &B = CGF.Builder; src = EnforceType(B, src, IdTy); dst = EnforceType(B, dst, PtrToIdTy); - B.CreateCall2(StrongCastAssignFn, src, dst); + B.CreateCall(StrongCastAssignFn.getType(), StrongCastAssignFn, {src, dst}); } void CGObjCGNU::EmitGCMemmoveCollectable(CodeGenFunction &CGF, @@ -2728,7 +2729,7 @@ void CGObjCGNU::EmitGCMemmoveCollectable(CodeGenFunction &CGF, DestPtr = EnforceType(B, DestPtr, PtrTy); SrcPtr = EnforceType(B, SrcPtr, PtrTy); - B.CreateCall3(MemMoveFn, DestPtr, SrcPtr, Size); + B.CreateCall(MemMoveFn.getType(), MemMoveFn, {DestPtr, SrcPtr, Size}); } llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable( diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp index f91e8e1..a45446a 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp @@ -486,7 +486,6 @@ public: } ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm); - ~ObjCCommonTypesHelper(){} }; /// ObjCTypesHelper - Helper class that encapsulates lazy @@ -595,7 +594,6 @@ public: public: ObjCTypesHelper(CodeGen::CodeGenModule &cgm); - ~ObjCTypesHelper() {} }; /// ObjCNonFragileABITypesHelper - will have all types needed by objective-c's @@ -733,7 +731,6 @@ public: llvm::Type *EHTypePtrTy; ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm); - ~ObjCNonFragileABITypesHelper(){} }; class CGObjCCommonMac : public CodeGen::CGObjCRuntime { @@ -1678,14 +1675,13 @@ struct NullReturnState { /// getConstantGEP() - Help routine to construct simple GEPs. static llvm::Constant *getConstantGEP(llvm::LLVMContext &VMContext, - llvm::Constant *C, - unsigned idx0, + llvm::GlobalVariable *C, unsigned idx0, unsigned idx1) { llvm::Value *Idxs[] = { llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), idx0), llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), idx1) }; - return llvm::ConstantExpr::getGetElementPtr(C, Idxs); + return llvm::ConstantExpr::getGetElementPtr(C->getValueType(), C, Idxs); } /// hasObjCExceptionAttribute - Return true if this class or any super @@ -1791,8 +1787,9 @@ CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF, CGF.CreateTempAlloca(ObjCTypes.SuperTy, "objc_super"); llvm::Value *ReceiverAsObject = CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy); - CGF.Builder.CreateStore(ReceiverAsObject, - CGF.Builder.CreateStructGEP(ObjCSuper, 0)); + CGF.Builder.CreateStore( + ReceiverAsObject, + CGF.Builder.CreateStructGEP(ObjCTypes.SuperTy, ObjCSuper, 0)); // If this is a class message the metaclass is passed as the target. llvm::Value *Target; @@ -1805,20 +1802,20 @@ CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF, // the class's "isa" pointer. The following assumes that // isa" is the first ivar in a class (which it must be). Target = EmitClassRef(CGF, Class->getSuperClass()); - Target = CGF.Builder.CreateStructGEP(Target, 0); + Target = CGF.Builder.CreateStructGEP(ObjCTypes.ClassTy, Target, 0); Target = CGF.Builder.CreateLoad(Target); } else { - llvm::Value *MetaClassPtr = EmitMetaClassRef(Class); - llvm::Value *SuperPtr = CGF.Builder.CreateStructGEP(MetaClassPtr, 1); + llvm::Constant *MetaClassPtr = EmitMetaClassRef(Class); + llvm::Value *SuperPtr = + CGF.Builder.CreateStructGEP(ObjCTypes.ClassTy, MetaClassPtr, 1); llvm::Value *Super = CGF.Builder.CreateLoad(SuperPtr); Target = Super; } - } - else if (isCategoryImpl) + } else if (isCategoryImpl) Target = EmitClassRef(CGF, Class->getSuperClass()); else { llvm::Value *ClassPtr = EmitSuperClassRef(Class); - ClassPtr = CGF.Builder.CreateStructGEP(ClassPtr, 1); + ClassPtr = CGF.Builder.CreateStructGEP(ObjCTypes.ClassTy, ClassPtr, 1); Target = CGF.Builder.CreateLoad(ClassPtr); } // FIXME: We shouldn't need to do this cast, rectify the ASTContext and @@ -1826,8 +1823,8 @@ CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF, llvm::Type *ClassTy = CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType()); Target = CGF.Builder.CreateBitCast(Target, ClassTy); - CGF.Builder.CreateStore(Target, - CGF.Builder.CreateStructGEP(ObjCSuper, 1)); + CGF.Builder.CreateStore( + Target, CGF.Builder.CreateStructGEP(ObjCTypes.SuperTy, ObjCSuper, 1)); return EmitMessageSend(CGF, Return, ResultType, EmitSelector(CGF, Sel), ObjCSuper, ObjCTypes.SuperPtrCTy, @@ -3810,15 +3807,16 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, // Enter a try block: // - Call objc_exception_try_enter to push ExceptionData on top of // the EH stack. - CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData); + CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionTryEnterFn(), + ExceptionData); // - Call setjmp on the exception data buffer. llvm::Constant *Zero = llvm::ConstantInt::get(CGF.Builder.getInt32Ty(), 0); llvm::Value *GEPIndexes[] = { Zero, Zero, Zero }; - llvm::Value *SetJmpBuffer = - CGF.Builder.CreateGEP(ExceptionData, GEPIndexes, "setjmp_buffer"); - llvm::CallInst *SetJmpResult = - CGF.EmitNounwindRuntimeCall(ObjCTypes.getSetJmpFn(), SetJmpBuffer, "setjmp_result"); + llvm::Value *SetJmpBuffer = CGF.Builder.CreateGEP( + ObjCTypes.ExceptionDataTy, ExceptionData, GEPIndexes, "setjmp_buffer"); + llvm::CallInst *SetJmpResult = CGF.EmitNounwindRuntimeCall( + ObjCTypes.getSetJmpFn(), SetJmpBuffer, "setjmp_result"); SetJmpResult->setCanReturnTwice(); // If setjmp returned 0, enter the protected block; otherwise, @@ -5263,6 +5261,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul // const uint32_t size; // sizeof(struct _protocol_t) // const uint32_t flags; // = 0 // const char ** extendedMethodTypes; + // const char *demangledName; // } // Holder for struct _protocol_list_t * @@ -5275,6 +5274,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul MethodListnfABIPtrTy, MethodListnfABIPtrTy, MethodListnfABIPtrTy, MethodListnfABIPtrTy, PropertyListPtrTy, IntTy, IntTy, Int8PtrPtrTy, + Int8PtrTy, nullptr); // struct _protocol_t* @@ -6207,6 +6207,7 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocolRef( /// const uint32_t size; // sizeof(struct _protocol_t) /// const uint32_t flags; // = 0 /// const char ** extendedMethodTypes; +/// const char *demangledName; /// } /// @endcode /// @@ -6258,7 +6259,7 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol( MethodTypesExt.insert(MethodTypesExt.end(), OptMethodTypesExt.begin(), OptMethodTypesExt.end()); - llvm::Constant *Values[11]; + llvm::Constant *Values[12]; // isa is NULL Values[0] = llvm::Constant::getNullValue(ObjCTypes.ObjectPtrTy); Values[1] = GetClassName(PD->getObjCRuntimeNameAsString()); @@ -6291,6 +6292,9 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol( Values[10] = EmitProtocolMethodTypes("\01l_OBJC_$_PROTOCOL_METHOD_TYPES_" + PD->getObjCRuntimeNameAsString(), MethodTypesExt, ObjCTypes); + // const char *demangledName; + Values[11] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy); + llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolnfABITy, Values); @@ -6562,7 +6566,8 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF, args[1].RV = RValue::get(mref); // Load the function to call from the message ref table. - llvm::Value *callee = CGF.Builder.CreateStructGEP(mref, 0); + llvm::Value *callee = + CGF.Builder.CreateStructGEP(ObjCTypes.MessageRefTy, mref, 0); callee = CGF.Builder.CreateLoad(callee, "msgSend_fn"); callee = CGF.Builder.CreateBitCast(callee, MSI.MessengerType); @@ -6727,8 +6732,9 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF, llvm::Value *ReceiverAsObject = CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy); - CGF.Builder.CreateStore(ReceiverAsObject, - CGF.Builder.CreateStructGEP(ObjCSuper, 0)); + CGF.Builder.CreateStore( + ReceiverAsObject, + CGF.Builder.CreateStructGEP(ObjCTypes.SuperTy, ObjCSuper, 0)); // If this is a class message the metaclass is passed as the target. llvm::Value *Target; @@ -6742,8 +6748,8 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF, llvm::Type *ClassTy = CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType()); Target = CGF.Builder.CreateBitCast(Target, ClassTy); - CGF.Builder.CreateStore(Target, - CGF.Builder.CreateStructGEP(ObjCSuper, 1)); + CGF.Builder.CreateStore( + Target, CGF.Builder.CreateStructGEP(ObjCTypes.SuperTy, ObjCSuper, 1)); return (isVTableDispatchedSelector(Sel)) ? EmitVTableMessageSend(CGF, Return, ResultType, Sel, @@ -6992,10 +6998,10 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID, llvm::Value *VTableIdx = llvm::ConstantInt::get(CGM.Int32Ty, 2); llvm::Constant *Values[] = { - llvm::ConstantExpr::getGetElementPtr(VTableGV, VTableIdx), - GetClassName(ID->getObjCRuntimeNameAsString()), - GetClassGlobal(ClassName.str()) - }; + llvm::ConstantExpr::getGetElementPtr(VTableGV->getValueType(), VTableGV, + VTableIdx), + GetClassName(ID->getObjCRuntimeNameAsString()), + GetClassGlobal(ClassName.str())}; llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.EHTypeTy, Values); diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp index 3d013da..5290a87 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp @@ -160,7 +160,7 @@ namespace { void Emit(CodeGenFunction &CGF, Flags flags) override { if (!MightThrow) { - CGF.Builder.CreateCall(Fn)->setDoesNotThrow(); + CGF.Builder.CreateCall(Fn, {})->setDoesNotThrow(); return; } diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp index 22ee00f..1238acc 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGOpenMPRuntime.cpp @@ -13,6 +13,7 @@ #include "CGOpenMPRuntime.h" #include "CodeGenFunction.h" +#include "CGCleanup.h" #include "clang/AST/Decl.h" #include "clang/AST/StmtOpenMP.h" #include "llvm/ADT/ArrayRef.h" @@ -27,61 +28,236 @@ using namespace clang; using namespace CodeGen; namespace { -/// \brief API for captured statement code generation in OpenMP constructs. +/// \brief Base class for handling code generation inside OpenMP regions. class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo { public: - CGOpenMPRegionInfo(const OMPExecutableDirective &D, const CapturedStmt &CS, - const VarDecl *ThreadIDVar) - : CGCapturedStmtInfo(CS, CR_OpenMP), ThreadIDVar(ThreadIDVar), - Directive(D) { - assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region."); - } + /// \brief Kinds of OpenMP regions used in codegen. + enum CGOpenMPRegionKind { + /// \brief Region with outlined function for standalone 'parallel' + /// directive. + ParallelOutlinedRegion, + /// \brief Region with outlined function for standalone 'task' directive. + TaskOutlinedRegion, + /// \brief Region for constructs that do not require function outlining, + /// like 'for', 'sections', 'atomic' etc. directives. + InlinedRegion, + }; + + CGOpenMPRegionInfo(const CapturedStmt &CS, + const CGOpenMPRegionKind RegionKind, + const RegionCodeGenTy &CodeGen) + : CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind), + CodeGen(CodeGen) {} + + CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind, + const RegionCodeGenTy &CodeGen) + : CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), + CodeGen(CodeGen) {} - /// \brief Gets a variable or parameter for storing global thread id + /// \brief Get a variable or parameter for storing global thread id /// inside OpenMP construct. - const VarDecl *getThreadIDVariable() const { return ThreadIDVar; } + virtual const VarDecl *getThreadIDVariable() const = 0; - /// \brief Gets an LValue for the current ThreadID variable. - LValue getThreadIDVariableLValue(CodeGenFunction &CGF); + /// \brief Emit the captured statement body. + virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) override; + + /// \brief Get an LValue for the current ThreadID variable. + /// \return LValue for thread id variable. This LValue always has type int32*. + virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF); + + CGOpenMPRegionKind getRegionKind() const { return RegionKind; } static bool classof(const CGCapturedStmtInfo *Info) { return Info->getKind() == CR_OpenMP; } - /// \brief Emit the captured statement body. - void EmitBody(CodeGenFunction &CGF, Stmt *S) override; +protected: + CGOpenMPRegionKind RegionKind; + const RegionCodeGenTy &CodeGen; +}; + +/// \brief API for captured statement code generation in OpenMP constructs. +class CGOpenMPOutlinedRegionInfo : public CGOpenMPRegionInfo { +public: + CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar, + const RegionCodeGenTy &CodeGen) + : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen), + ThreadIDVar(ThreadIDVar) { + assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region."); + } + /// \brief Get a variable or parameter for storing global thread id + /// inside OpenMP construct. + const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; } /// \brief Get the name of the capture helper. StringRef getHelperName() const override { return ".omp_outlined."; } + static bool classof(const CGCapturedStmtInfo *Info) { + return CGOpenMPRegionInfo::classof(Info) && + cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == + ParallelOutlinedRegion; + } + private: /// \brief A variable or parameter storing global thread id for OpenMP /// constructs. const VarDecl *ThreadIDVar; - /// \brief OpenMP executable directive associated with the region. - const OMPExecutableDirective &Directive; }; + +/// \brief API for captured statement code generation in OpenMP constructs. +class CGOpenMPTaskOutlinedRegionInfo : public CGOpenMPRegionInfo { +public: + CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS, + const VarDecl *ThreadIDVar, + const RegionCodeGenTy &CodeGen) + : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen), + ThreadIDVar(ThreadIDVar) { + assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region."); + } + /// \brief Get a variable or parameter for storing global thread id + /// inside OpenMP construct. + const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; } + + /// \brief Get an LValue for the current ThreadID variable. + LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override; + + /// \brief Get the name of the capture helper. + StringRef getHelperName() const override { return ".omp_outlined."; } + + static bool classof(const CGCapturedStmtInfo *Info) { + return CGOpenMPRegionInfo::classof(Info) && + cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == + TaskOutlinedRegion; + } + +private: + /// \brief A variable or parameter storing global thread id for OpenMP + /// constructs. + const VarDecl *ThreadIDVar; +}; + +/// \brief API for inlined captured statement code generation in OpenMP +/// constructs. +class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo { +public: + CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI, + const RegionCodeGenTy &CodeGen) + : CGOpenMPRegionInfo(InlinedRegion, CodeGen), OldCSI(OldCSI), + OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {} + // \brief Retrieve the value of the context parameter. + llvm::Value *getContextValue() const override { + if (OuterRegionInfo) + return OuterRegionInfo->getContextValue(); + llvm_unreachable("No context value for inlined OpenMP region"); + } + virtual void setContextValue(llvm::Value *V) override { + if (OuterRegionInfo) { + OuterRegionInfo->setContextValue(V); + return; + } + llvm_unreachable("No context value for inlined OpenMP region"); + } + /// \brief Lookup the captured field decl for a variable. + const FieldDecl *lookup(const VarDecl *VD) const override { + if (OuterRegionInfo) + return OuterRegionInfo->lookup(VD); + // If there is no outer outlined region,no need to lookup in a list of + // captured variables, we can use the original one. + return nullptr; + } + FieldDecl *getThisFieldDecl() const override { + if (OuterRegionInfo) + return OuterRegionInfo->getThisFieldDecl(); + return nullptr; + } + /// \brief Get a variable or parameter for storing global thread id + /// inside OpenMP construct. + const VarDecl *getThreadIDVariable() const override { + if (OuterRegionInfo) + return OuterRegionInfo->getThreadIDVariable(); + return nullptr; + } + + /// \brief Get the name of the capture helper. + StringRef getHelperName() const override { + if (auto *OuterRegionInfo = getOldCSI()) + return OuterRegionInfo->getHelperName(); + llvm_unreachable("No helper name for inlined OpenMP construct"); + } + + CodeGenFunction::CGCapturedStmtInfo *getOldCSI() const { return OldCSI; } + + static bool classof(const CGCapturedStmtInfo *Info) { + return CGOpenMPRegionInfo::classof(Info) && + cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == InlinedRegion; + } + +private: + /// \brief CodeGen info about outer OpenMP region. + CodeGenFunction::CGCapturedStmtInfo *OldCSI; + CGOpenMPRegionInfo *OuterRegionInfo; +}; + +/// \brief RAII for emitting code of OpenMP constructs. +class InlinedOpenMPRegionRAII { + CodeGenFunction &CGF; + +public: + /// \brief Constructs region for combined constructs. + /// \param CodeGen Code generation sequence for combined directives. Includes + /// a list of functions used for code generation of implicitly inlined + /// regions. + InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen) + : CGF(CGF) { + // Start emission for the construct. + CGF.CapturedStmtInfo = + new CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, CodeGen); + } + ~InlinedOpenMPRegionRAII() { + // Restore original CapturedStmtInfo only if we're done with code emission. + auto *OldCSI = + cast<CGOpenMPInlinedRegionInfo>(CGF.CapturedStmtInfo)->getOldCSI(); + delete CGF.CapturedStmtInfo; + CGF.CapturedStmtInfo = OldCSI; + } +}; + } // namespace LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) { return CGF.MakeNaturalAlignAddrLValue( - CGF.GetAddrOfLocalVar(ThreadIDVar), - CGF.getContext().getPointerType(ThreadIDVar->getType())); + CGF.Builder.CreateAlignedLoad( + CGF.GetAddrOfLocalVar(getThreadIDVariable()), + CGF.PointerAlignInBytes), + getThreadIDVariable() + ->getType() + ->castAs<PointerType>() + ->getPointeeType()); } -void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, Stmt *S) { - CodeGenFunction::OMPPrivateScope PrivateScope(CGF); - CGF.EmitOMPPrivateClause(Directive, PrivateScope); - CGF.EmitOMPFirstprivateClause(Directive, PrivateScope); - if (PrivateScope.Privatize()) - // Emit implicit barrier to synchronize threads and avoid data races. - CGF.CGM.getOpenMPRuntime().EmitOMPBarrierCall(CGF, Directive.getLocStart(), - /*IsExplicit=*/false); - CGCapturedStmtInfo::EmitBody(CGF, S); +void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt * /*S*/) { + // 1.2.2 OpenMP Language Terminology + // Structured block - An executable statement with a single entry at the + // top and a single exit at the bottom. + // The point of exit cannot be a branch out of the structured block. + // longjmp() and throw() must not violate the entry/exit criteria. + CGF.EHStack.pushTerminate(); + { + CodeGenFunction::RunCleanupsScope Scope(CGF); + CodeGen(CGF); + } + CGF.EHStack.popTerminate(); +} + +LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue( + CodeGenFunction &CGF) { + return CGF.MakeNaturalAlignAddrLValue( + CGF.GetAddrOfLocalVar(getThreadIDVariable()), + getThreadIDVariable()->getType()); } CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM) - : CGM(CGM), DefaultOpenMPPSource(nullptr) { + : CGM(CGM), DefaultOpenMPPSource(nullptr), KmpRoutineEntryPtrTy(nullptr) { IdentTy = llvm::StructType::create( "ident_t", CGM.Int32Ty /* reserved_1 */, CGM.Int32Ty /* flags */, CGM.Int32Ty /* reserved_2 */, CGM.Int32Ty /* reserved_3 */, @@ -93,18 +269,38 @@ CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM) KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8); } +void CGOpenMPRuntime::clear() { + InternalVars.clear(); +} + llvm::Value * -CGOpenMPRuntime::EmitOpenMPOutlinedFunction(const OMPExecutableDirective &D, - const VarDecl *ThreadIDVar) { +CGOpenMPRuntime::emitParallelOutlinedFunction(const OMPExecutableDirective &D, + const VarDecl *ThreadIDVar, + const RegionCodeGenTy &CodeGen) { + assert(ThreadIDVar->getType()->isPointerType() && + "thread id variable must be of type kmp_int32 *"); const CapturedStmt *CS = cast<CapturedStmt>(D.getAssociatedStmt()); CodeGenFunction CGF(CGM, true); - CGOpenMPRegionInfo CGInfo(D, *CS, ThreadIDVar); + CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen); CGF.CapturedStmtInfo = &CGInfo; return CGF.GenerateCapturedStmtFunction(*CS); } llvm::Value * -CGOpenMPRuntime::GetOrCreateDefaultOpenMPLocation(OpenMPLocationFlags Flags) { +CGOpenMPRuntime::emitTaskOutlinedFunction(const OMPExecutableDirective &D, + const VarDecl *ThreadIDVar, + const RegionCodeGenTy &CodeGen) { + assert(!ThreadIDVar->getType()->isPointerType() && + "thread id variable must be of type kmp_int32 for tasks"); + auto *CS = cast<CapturedStmt>(D.getAssociatedStmt()); + CodeGenFunction CGF(CGM, true); + CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen); + CGF.CapturedStmtInfo = &CGInfo; + return CGF.GenerateCapturedStmtFunction(*CS); +} + +llvm::Value * +CGOpenMPRuntime::getOrCreateDefaultLocation(OpenMPLocationFlags Flags) { llvm::Value *Entry = OpenMPDefaultLocMap.lookup(Flags); if (!Entry) { if (!DefaultOpenMPPSource) { @@ -134,12 +330,13 @@ CGOpenMPRuntime::GetOrCreateDefaultOpenMPLocation(OpenMPLocationFlags Flags) { return Entry; } -llvm::Value *CGOpenMPRuntime::EmitOpenMPUpdateLocation( - CodeGenFunction &CGF, SourceLocation Loc, OpenMPLocationFlags Flags) { +llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF, + SourceLocation Loc, + OpenMPLocationFlags Flags) { // If no debug info is generated - return global default location. if (CGM.getCodeGenOpts().getDebugInfo() == CodeGenOptions::NoDebugInfo || Loc.isInvalid()) - return GetOrCreateDefaultOpenMPLocation(Flags); + return getOrCreateDefaultLocation(Flags); assert(CGF.CurFn && "No function in current CodeGenFunction."); @@ -159,14 +356,14 @@ llvm::Value *CGOpenMPRuntime::EmitOpenMPUpdateLocation( CGBuilderTy::InsertPointGuard IPG(CGF.Builder); CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt); - CGF.Builder.CreateMemCpy(LocValue, GetOrCreateDefaultOpenMPLocation(Flags), + CGF.Builder.CreateMemCpy(LocValue, getOrCreateDefaultLocation(Flags), llvm::ConstantExpr::getSizeOf(IdentTy), CGM.PointerAlignInBytes); } // char **psource = &.kmpc_loc_<flags>.addr.psource; - auto *PSource = - CGF.Builder.CreateConstInBoundsGEP2_32(LocValue, 0, IdentField_PSource); + auto *PSource = CGF.Builder.CreateConstInBoundsGEP2_32(IdentTy, LocValue, 0, + IdentField_PSource); auto OMPDebugLoc = OpenMPDebugLocMap.lookup(Loc.getRawEncoding()); if (OMPDebugLoc == nullptr) { @@ -189,8 +386,8 @@ llvm::Value *CGOpenMPRuntime::EmitOpenMPUpdateLocation( return LocValue; } -llvm::Value *CGOpenMPRuntime::GetOpenMPThreadID(CodeGenFunction &CGF, - SourceLocation Loc) { +llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF, + SourceLocation Loc) { assert(CGF.CurFn && "No function in current CodeGenFunction."); llvm::Value *ThreadID = nullptr; @@ -204,36 +401,35 @@ llvm::Value *CGOpenMPRuntime::GetOpenMPThreadID(CodeGenFunction &CGF, } if (auto OMPRegionInfo = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) { - // Check if this an outlined function with thread id passed as argument. - auto ThreadIDVar = OMPRegionInfo->getThreadIDVariable(); - auto LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF); - auto RVal = CGF.EmitLoadOfLValue(LVal, Loc); - LVal = CGF.MakeNaturalAlignAddrLValue(RVal.getScalarVal(), - ThreadIDVar->getType()); - ThreadID = CGF.EmitLoadOfLValue(LVal, Loc).getScalarVal(); - // If value loaded in entry block, cache it and use it everywhere in - // function. - if (CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) { - auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn); - Elem.second.ThreadID = ThreadID; + if (OMPRegionInfo->getThreadIDVariable()) { + // Check if this an outlined function with thread id passed as argument. + auto LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF); + ThreadID = CGF.EmitLoadOfLValue(LVal, Loc).getScalarVal(); + // If value loaded in entry block, cache it and use it everywhere in + // function. + if (CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) { + auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn); + Elem.second.ThreadID = ThreadID; + } + return ThreadID; } - } else { - // This is not an outlined function region - need to call __kmpc_int32 - // kmpc_global_thread_num(ident_t *loc). - // Generate thread id value and cache this value for use across the - // function. - CGBuilderTy::InsertPointGuard IPG(CGF.Builder); - CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt); - llvm::Value *Args[] = {EmitOpenMPUpdateLocation(CGF, Loc)}; - ThreadID = CGF.EmitRuntimeCall( - CreateRuntimeFunction(OMPRTL__kmpc_global_thread_num), Args); - auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn); - Elem.second.ThreadID = ThreadID; } + + // This is not an outlined function region - need to call __kmpc_int32 + // kmpc_global_thread_num(ident_t *loc). + // Generate thread id value and cache this value for use across the + // function. + CGBuilderTy::InsertPointGuard IPG(CGF.Builder); + CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt); + ThreadID = + CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_global_thread_num), + emitUpdateLocation(CGF, Loc)); + auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn); + Elem.second.ThreadID = ThreadID; return ThreadID; } -void CGOpenMPRuntime::FunctionFinished(CodeGenFunction &CGF) { +void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) { assert(CGF.CurFn && "No function in current CodeGenFunction."); if (OpenMPLocThreadIDMap.count(CGF.CurFn)) OpenMPLocThreadIDMap.erase(CGF.CurFn); @@ -248,7 +444,7 @@ llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() { } llvm::Constant * -CGOpenMPRuntime::CreateRuntimeFunction(OpenMPRTLFunction Function) { +CGOpenMPRuntime::createRuntimeFunction(OpenMPRTLFunction Function) { llvm::Constant *RTLFn = nullptr; switch (Function) { case OMPRTL__kmpc_fork_call: { @@ -334,87 +530,6 @@ CGOpenMPRuntime::CreateRuntimeFunction(OpenMPRTLFunction Function) { RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_cancel_barrier"); break; } - // Build __kmpc_for_static_init*( - // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype, - // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower, - // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride, - // kmp_int[32|64] incr, kmp_int[32|64] chunk); - case OMPRTL__kmpc_for_static_init_4: { - auto ITy = CGM.Int32Ty; - auto PtrTy = llvm::PointerType::getUnqual(ITy); - llvm::Type *TypeParams[] = { - getIdentTyPointerTy(), // loc - CGM.Int32Ty, // tid - CGM.Int32Ty, // schedtype - llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter - PtrTy, // p_lower - PtrTy, // p_upper - PtrTy, // p_stride - ITy, // incr - ITy // chunk - }; - llvm::FunctionType *FnTy = - llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); - RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_init_4"); - break; - } - case OMPRTL__kmpc_for_static_init_4u: { - auto ITy = CGM.Int32Ty; - auto PtrTy = llvm::PointerType::getUnqual(ITy); - llvm::Type *TypeParams[] = { - getIdentTyPointerTy(), // loc - CGM.Int32Ty, // tid - CGM.Int32Ty, // schedtype - llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter - PtrTy, // p_lower - PtrTy, // p_upper - PtrTy, // p_stride - ITy, // incr - ITy // chunk - }; - llvm::FunctionType *FnTy = - llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); - RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_init_4u"); - break; - } - case OMPRTL__kmpc_for_static_init_8: { - auto ITy = CGM.Int64Ty; - auto PtrTy = llvm::PointerType::getUnqual(ITy); - llvm::Type *TypeParams[] = { - getIdentTyPointerTy(), // loc - CGM.Int32Ty, // tid - CGM.Int32Ty, // schedtype - llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter - PtrTy, // p_lower - PtrTy, // p_upper - PtrTy, // p_stride - ITy, // incr - ITy // chunk - }; - llvm::FunctionType *FnTy = - llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); - RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_init_8"); - break; - } - case OMPRTL__kmpc_for_static_init_8u: { - auto ITy = CGM.Int64Ty; - auto PtrTy = llvm::PointerType::getUnqual(ITy); - llvm::Type *TypeParams[] = { - getIdentTyPointerTy(), // loc - CGM.Int32Ty, // tid - CGM.Int32Ty, // schedtype - llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter - PtrTy, // p_lower - PtrTy, // p_upper - PtrTy, // p_stride - ITy, // incr - ITy // chunk - }; - llvm::FunctionType *FnTy = - llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); - RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_init_8u"); - break; - } case OMPRTL__kmpc_for_static_fini: { // Build void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid); llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; @@ -452,10 +567,10 @@ CGOpenMPRuntime::CreateRuntimeFunction(OpenMPRTLFunction Function) { break; } case OMPRTL__kmpc_flush: { - // Build void __kmpc_flush(ident_t *loc, ...); + // Build void __kmpc_flush(ident_t *loc); llvm::Type *TypeParams[] = {getIdentTyPointerTy()}; llvm::FunctionType *FnTy = - llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true); + llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_flush"); break; } @@ -475,38 +590,291 @@ CGOpenMPRuntime::CreateRuntimeFunction(OpenMPRTLFunction Function) { RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_master"); break; } + case OMPRTL__kmpc_omp_taskyield: { + // Build kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid, + // int end_part); + llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy}; + llvm::FunctionType *FnTy = + llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false); + RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_taskyield"); + break; + } + case OMPRTL__kmpc_single: { + // Build kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid); + llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; + llvm::FunctionType *FnTy = + llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false); + RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_single"); + break; + } + case OMPRTL__kmpc_end_single: { + // Build void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid); + llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; + llvm::FunctionType *FnTy = + llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); + RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_single"); + break; + } + case OMPRTL__kmpc_omp_task_alloc: { + // Build kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid, + // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, + // kmp_routine_entry_t *task_entry); + assert(KmpRoutineEntryPtrTy != nullptr && + "Type kmp_routine_entry_t must be created."); + llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, + CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy}; + // Return void * and then cast to particular kmp_task_t type. + llvm::FunctionType *FnTy = + llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false); + RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_alloc"); + break; + } + case OMPRTL__kmpc_omp_task: { + // Build kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t + // *new_task); + llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, + CGM.VoidPtrTy}; + llvm::FunctionType *FnTy = + llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false); + RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task"); + break; + } + case OMPRTL__kmpc_copyprivate: { + // Build void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid, + // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *), + // kmp_int32 didit); + llvm::Type *CpyTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy}; + auto *CpyFnTy = + llvm::FunctionType::get(CGM.VoidTy, CpyTypeParams, /*isVarArg=*/false); + llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.SizeTy, + CGM.VoidPtrTy, CpyFnTy->getPointerTo(), + CGM.Int32Ty}; + llvm::FunctionType *FnTy = + llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); + RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_copyprivate"); + break; + } + case OMPRTL__kmpc_reduce: { + // Build kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid, + // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void + // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck); + llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy}; + auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams, + /*isVarArg=*/false); + llvm::Type *TypeParams[] = { + getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy, + CGM.VoidPtrTy, ReduceFnTy->getPointerTo(), + llvm::PointerType::getUnqual(KmpCriticalNameTy)}; + llvm::FunctionType *FnTy = + llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false); + RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce"); + break; + } + case OMPRTL__kmpc_reduce_nowait: { + // Build kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32 + // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, + // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name + // *lck); + llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy}; + auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams, + /*isVarArg=*/false); + llvm::Type *TypeParams[] = { + getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy, + CGM.VoidPtrTy, ReduceFnTy->getPointerTo(), + llvm::PointerType::getUnqual(KmpCriticalNameTy)}; + llvm::FunctionType *FnTy = + llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false); + RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce_nowait"); + break; + } + case OMPRTL__kmpc_end_reduce: { + // Build void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid, + // kmp_critical_name *lck); + llvm::Type *TypeParams[] = { + getIdentTyPointerTy(), CGM.Int32Ty, + llvm::PointerType::getUnqual(KmpCriticalNameTy)}; + llvm::FunctionType *FnTy = + llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); + RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce"); + break; + } + case OMPRTL__kmpc_end_reduce_nowait: { + // Build __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid, + // kmp_critical_name *lck); + llvm::Type *TypeParams[] = { + getIdentTyPointerTy(), CGM.Int32Ty, + llvm::PointerType::getUnqual(KmpCriticalNameTy)}; + llvm::FunctionType *FnTy = + llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); + RTLFn = + CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce_nowait"); + break; + } + case OMPRTL__kmpc_omp_task_begin_if0: { + // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t + // *new_task); + llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, + CGM.VoidPtrTy}; + llvm::FunctionType *FnTy = + llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); + RTLFn = + CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_begin_if0"); + break; + } + case OMPRTL__kmpc_omp_task_complete_if0: { + // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t + // *new_task); + llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, + CGM.VoidPtrTy}; + llvm::FunctionType *FnTy = + llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); + RTLFn = CGM.CreateRuntimeFunction(FnTy, + /*Name=*/"__kmpc_omp_task_complete_if0"); + break; + } + case OMPRTL__kmpc_ordered: { + // Build void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid); + llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; + llvm::FunctionType *FnTy = + llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); + RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_ordered"); + break; + } + case OMPRTL__kmpc_end_ordered: { + // Build void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid); + llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; + llvm::FunctionType *FnTy = + llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); + RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_ordered"); + break; + } + case OMPRTL__kmpc_omp_taskwait: { + // Build kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 global_tid); + llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; + llvm::FunctionType *FnTy = + llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false); + RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_omp_taskwait"); + break; + } } return RTLFn; } +llvm::Constant *CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize, + bool IVSigned) { + assert((IVSize == 32 || IVSize == 64) && + "IV size is not compatible with the omp runtime"); + auto Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4" + : "__kmpc_for_static_init_4u") + : (IVSigned ? "__kmpc_for_static_init_8" + : "__kmpc_for_static_init_8u"); + auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty; + auto PtrTy = llvm::PointerType::getUnqual(ITy); + llvm::Type *TypeParams[] = { + getIdentTyPointerTy(), // loc + CGM.Int32Ty, // tid + CGM.Int32Ty, // schedtype + llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter + PtrTy, // p_lower + PtrTy, // p_upper + PtrTy, // p_stride + ITy, // incr + ITy // chunk + }; + llvm::FunctionType *FnTy = + llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); + return CGM.CreateRuntimeFunction(FnTy, Name); +} + +llvm::Constant *CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize, + bool IVSigned) { + assert((IVSize == 32 || IVSize == 64) && + "IV size is not compatible with the omp runtime"); + auto Name = + IVSize == 32 + ? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u") + : (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u"); + auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty; + llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc + CGM.Int32Ty, // tid + CGM.Int32Ty, // schedtype + ITy, // lower + ITy, // upper + ITy, // stride + ITy // chunk + }; + llvm::FunctionType *FnTy = + llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); + return CGM.CreateRuntimeFunction(FnTy, Name); +} + +llvm::Constant *CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize, + bool IVSigned) { + assert((IVSize == 32 || IVSize == 64) && + "IV size is not compatible with the omp runtime"); + auto Name = + IVSize == 32 + ? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u") + : (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u"); + llvm::Type *TypeParams[] = { + getIdentTyPointerTy(), // loc + CGM.Int32Ty, // tid + }; + llvm::FunctionType *FnTy = + llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); + return CGM.CreateRuntimeFunction(FnTy, Name); +} + +llvm::Constant *CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize, + bool IVSigned) { + assert((IVSize == 32 || IVSize == 64) && + "IV size is not compatible with the omp runtime"); + auto Name = + IVSize == 32 + ? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u") + : (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u"); + auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty; + auto PtrTy = llvm::PointerType::getUnqual(ITy); + llvm::Type *TypeParams[] = { + getIdentTyPointerTy(), // loc + CGM.Int32Ty, // tid + llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter + PtrTy, // p_lower + PtrTy, // p_upper + PtrTy // p_stride + }; + llvm::FunctionType *FnTy = + llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false); + return CGM.CreateRuntimeFunction(FnTy, Name); +} + llvm::Constant * CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) { // Lookup the entry, lazily creating it if necessary. - return GetOrCreateInternalVariable(CGM.Int8PtrPtrTy, + return getOrCreateInternalVariable(CGM.Int8PtrPtrTy, Twine(CGM.getMangledName(VD)) + ".cache."); } -llvm::Value *CGOpenMPRuntime::getOMPAddrOfThreadPrivate(CodeGenFunction &CGF, - const VarDecl *VD, - llvm::Value *VDAddr, - SourceLocation Loc) { +llvm::Value *CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF, + const VarDecl *VD, + llvm::Value *VDAddr, + SourceLocation Loc) { auto VarTy = VDAddr->getType()->getPointerElementType(); - llvm::Value *Args[] = {EmitOpenMPUpdateLocation(CGF, Loc), - GetOpenMPThreadID(CGF, Loc), + llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), CGF.Builder.CreatePointerCast(VDAddr, CGM.Int8PtrTy), CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)), getOrCreateThreadPrivateCache(VD)}; return CGF.EmitRuntimeCall( - CreateRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args); + createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args); } -void CGOpenMPRuntime::EmitOMPThreadPrivateVarInit( +void CGOpenMPRuntime::emitThreadPrivateVarInit( CodeGenFunction &CGF, llvm::Value *VDAddr, llvm::Value *Ctor, llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) { // Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime // library. - auto OMPLoc = EmitOpenMPUpdateLocation(CGF, Loc); - CGF.EmitRuntimeCall(CreateRuntimeFunction(OMPRTL__kmpc_global_thread_num), + auto OMPLoc = emitUpdateLocation(CGF, Loc); + CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_global_thread_num), OMPLoc); // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor) // to register constructor/destructor for variable. @@ -514,10 +882,10 @@ void CGOpenMPRuntime::EmitOMPThreadPrivateVarInit( CGF.Builder.CreatePointerCast(VDAddr, CGM.VoidPtrTy), Ctor, CopyCtor, Dtor}; CGF.EmitRuntimeCall( - CreateRuntimeFunction(OMPRTL__kmpc_threadprivate_register), Args); + createRuntimeFunction(OMPRTL__kmpc_threadprivate_register), Args); } -llvm::Function *CGOpenMPRuntime::EmitOMPThreadPrivateVarDefinition( +llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition( const VarDecl *VD, llvm::Value *VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF) { VD = VD->getDefinition(CGM.getContext()); @@ -620,54 +988,121 @@ llvm::Function *CGOpenMPRuntime::EmitOMPThreadPrivateVarDefinition( InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction, CGM.getTypes().arrangeNullaryFunction(), ArgList, Loc); - EmitOMPThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc); + emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc); InitCGF.FinishFunction(); return InitFunction; } - EmitOMPThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc); + emitThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc); } return nullptr; } -void CGOpenMPRuntime::EmitOMPParallelCall(CodeGenFunction &CGF, - SourceLocation Loc, - llvm::Value *OutlinedFn, - llvm::Value *CapturedStruct) { - // Build call __kmpc_fork_call(loc, 1, microtask, captured_struct/*context*/) - llvm::Value *Args[] = { - EmitOpenMPUpdateLocation(CGF, Loc), - CGF.Builder.getInt32(1), // Number of arguments after 'microtask' argument - // (there is only one additional argument - 'context') - CGF.Builder.CreateBitCast(OutlinedFn, getKmpc_MicroPointerTy()), - CGF.EmitCastToVoidPtr(CapturedStruct)}; - auto RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_fork_call); - CGF.EmitRuntimeCall(RTLFn, Args); -} - -void CGOpenMPRuntime::EmitOMPSerialCall(CodeGenFunction &CGF, - SourceLocation Loc, - llvm::Value *OutlinedFn, - llvm::Value *CapturedStruct) { - auto ThreadID = GetOpenMPThreadID(CGF, Loc); - // Build calls: - // __kmpc_serialized_parallel(&Loc, GTid); - llvm::Value *SerArgs[] = {EmitOpenMPUpdateLocation(CGF, Loc), ThreadID}; - auto RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_serialized_parallel); - CGF.EmitRuntimeCall(RTLFn, SerArgs); - - // OutlinedFn(>id, &zero, CapturedStruct); - auto ThreadIDAddr = EmitThreadIDAddress(CGF, Loc); - auto Int32Ty = - CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true); - auto ZeroAddr = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".zero.addr"); - CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0)); - llvm::Value *OutlinedFnArgs[] = {ThreadIDAddr, ZeroAddr, CapturedStruct}; - CGF.EmitCallOrInvoke(OutlinedFn, OutlinedFnArgs); +/// \brief Emits code for OpenMP 'if' clause using specified \a CodeGen +/// function. Here is the logic: +/// if (Cond) { +/// ThenGen(); +/// } else { +/// ElseGen(); +/// } +static void emitOMPIfClause(CodeGenFunction &CGF, const Expr *Cond, + const RegionCodeGenTy &ThenGen, + const RegionCodeGenTy &ElseGen) { + CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange()); + + // If the condition constant folds and can be elided, try to avoid emitting + // the condition and the dead arm of the if/else. + bool CondConstant; + if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) { + CodeGenFunction::RunCleanupsScope Scope(CGF); + if (CondConstant) { + ThenGen(CGF); + } else { + ElseGen(CGF); + } + return; + } + + // Otherwise, the condition did not fold, or we couldn't elide it. Just + // emit the conditional branch. + auto ThenBlock = CGF.createBasicBlock("omp_if.then"); + auto ElseBlock = CGF.createBasicBlock("omp_if.else"); + auto ContBlock = CGF.createBasicBlock("omp_if.end"); + CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount=*/0); - // __kmpc_end_serialized_parallel(&Loc, GTid); - llvm::Value *EndSerArgs[] = {EmitOpenMPUpdateLocation(CGF, Loc), ThreadID}; - RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_end_serialized_parallel); - CGF.EmitRuntimeCall(RTLFn, EndSerArgs); + // Emit the 'then' code. + CGF.EmitBlock(ThenBlock); + { + CodeGenFunction::RunCleanupsScope ThenScope(CGF); + ThenGen(CGF); + } + CGF.EmitBranch(ContBlock); + // Emit the 'else' code if present. + { + // There is no need to emit line number for unconditional branch. + auto NL = ApplyDebugLocation::CreateEmpty(CGF); + CGF.EmitBlock(ElseBlock); + } + { + CodeGenFunction::RunCleanupsScope ThenScope(CGF); + ElseGen(CGF); + } + { + // There is no need to emit line number for unconditional branch. + auto NL = ApplyDebugLocation::CreateEmpty(CGF); + CGF.EmitBranch(ContBlock); + } + // Emit the continuation block for code after the if. + CGF.EmitBlock(ContBlock, /*IsFinished=*/true); +} + +void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, + llvm::Value *OutlinedFn, + llvm::Value *CapturedStruct, + const Expr *IfCond) { + auto *RTLoc = emitUpdateLocation(CGF, Loc); + auto &&ThenGen = + [this, OutlinedFn, CapturedStruct, RTLoc](CodeGenFunction &CGF) { + // Build call __kmpc_fork_call(loc, 1, microtask, + // captured_struct/*context*/) + llvm::Value *Args[] = { + RTLoc, + CGF.Builder.getInt32( + 1), // Number of arguments after 'microtask' argument + // (there is only one additional argument - 'context') + CGF.Builder.CreateBitCast(OutlinedFn, getKmpc_MicroPointerTy()), + CGF.EmitCastToVoidPtr(CapturedStruct)}; + auto RTLFn = createRuntimeFunction(OMPRTL__kmpc_fork_call); + CGF.EmitRuntimeCall(RTLFn, Args); + }; + auto &&ElseGen = [this, OutlinedFn, CapturedStruct, RTLoc, Loc]( + CodeGenFunction &CGF) { + auto ThreadID = getThreadID(CGF, Loc); + // Build calls: + // __kmpc_serialized_parallel(&Loc, GTid); + llvm::Value *Args[] = {RTLoc, ThreadID}; + CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_serialized_parallel), + Args); + + // OutlinedFn(>id, &zero, CapturedStruct); + auto ThreadIDAddr = emitThreadIDAddress(CGF, Loc); + auto Int32Ty = CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, + /*Signed*/ true); + auto ZeroAddr = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".zero.addr"); + CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0)); + llvm::Value *OutlinedFnArgs[] = {ThreadIDAddr, ZeroAddr, CapturedStruct}; + CGF.EmitCallOrInvoke(OutlinedFn, OutlinedFnArgs); + + // __kmpc_end_serialized_parallel(&Loc, GTid); + llvm::Value *EndArgs[] = {emitUpdateLocation(CGF, Loc), ThreadID}; + CGF.EmitRuntimeCall( + createRuntimeFunction(OMPRTL__kmpc_end_serialized_parallel), EndArgs); + }; + if (IfCond) { + emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen); + } else { + CodeGenFunction::RunCleanupsScope Scope(CGF); + ThenGen(CGF); + } } // If we're inside an (outlined) parallel region, use the region info's @@ -676,13 +1111,14 @@ void CGOpenMPRuntime::EmitOMPSerialCall(CodeGenFunction &CGF, // regular serial code region, get thread ID by calling kmp_int32 // kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and // return the address of that temp. -llvm::Value *CGOpenMPRuntime::EmitThreadIDAddress(CodeGenFunction &CGF, +llvm::Value *CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc) { if (auto OMPRegionInfo = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) - return CGF.EmitLoadOfLValue(OMPRegionInfo->getThreadIDVariableLValue(CGF), - SourceLocation()).getScalarVal(); - auto ThreadID = GetOpenMPThreadID(CGF, Loc); + if (OMPRegionInfo->getThreadIDVariable()) + return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress(); + + auto ThreadID = getThreadID(CGF, Loc); auto Int32Ty = CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true); auto ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp."); @@ -693,7 +1129,7 @@ llvm::Value *CGOpenMPRuntime::EmitThreadIDAddress(CodeGenFunction &CGF, } llvm::Constant * -CGOpenMPRuntime::GetOrCreateInternalVariable(llvm::Type *Ty, +CGOpenMPRuntime::getOrCreateInternalVariable(llvm::Type *Ty, const llvm::Twine &Name) { SmallString<256> Buffer; llvm::raw_svector_ostream Out(Buffer); @@ -712,31 +1148,51 @@ CGOpenMPRuntime::GetOrCreateInternalVariable(llvm::Type *Ty, Elem.first()); } -llvm::Value *CGOpenMPRuntime::GetCriticalRegionLock(StringRef CriticalName) { +llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) { llvm::Twine Name(".gomp_critical_user_", CriticalName); - return GetOrCreateInternalVariable(KmpCriticalNameTy, Name.concat(".var")); + return getOrCreateInternalVariable(KmpCriticalNameTy, Name.concat(".var")); } -void CGOpenMPRuntime::EmitOMPCriticalRegion( - CodeGenFunction &CGF, StringRef CriticalName, - const std::function<void()> &CriticalOpGen, SourceLocation Loc) { - auto RegionLock = GetCriticalRegionLock(CriticalName); +namespace { +template <size_t N> class CallEndCleanup : public EHScopeStack::Cleanup { + llvm::Value *Callee; + llvm::Value *Args[N]; + +public: + CallEndCleanup(llvm::Value *Callee, ArrayRef<llvm::Value *> CleanupArgs) + : Callee(Callee) { + assert(CleanupArgs.size() == N); + std::copy(CleanupArgs.begin(), CleanupArgs.end(), std::begin(Args)); + } + void Emit(CodeGenFunction &CGF, Flags /*flags*/) override { + CGF.EmitRuntimeCall(Callee, Args); + } +}; +} // namespace + +void CGOpenMPRuntime::emitCriticalRegion(CodeGenFunction &CGF, + StringRef CriticalName, + const RegionCodeGenTy &CriticalOpGen, + SourceLocation Loc) { // __kmpc_critical(ident_t *, gtid, Lock); // CriticalOpGen(); // __kmpc_end_critical(ident_t *, gtid, Lock); // Prepare arguments and build a call to __kmpc_critical - llvm::Value *Args[] = {EmitOpenMPUpdateLocation(CGF, Loc), - GetOpenMPThreadID(CGF, Loc), RegionLock}; - auto RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_critical); - CGF.EmitRuntimeCall(RTLFn, Args); - CriticalOpGen(); - // Build a call to __kmpc_end_critical - RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_end_critical); - CGF.EmitRuntimeCall(RTLFn, Args); -} - -static void EmitOMPIfStmt(CodeGenFunction &CGF, llvm::Value *IfCond, - const std::function<void()> &BodyOpGen) { + { + CodeGenFunction::RunCleanupsScope Scope(CGF); + llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), + getCriticalRegionLock(CriticalName)}; + CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_critical), Args); + // Build a call to __kmpc_end_critical + CGF.EHStack.pushCleanup<CallEndCleanup<std::extent<decltype(Args)>::value>>( + NormalAndEHCleanup, createRuntimeFunction(OMPRTL__kmpc_end_critical), + llvm::makeArrayRef(Args)); + emitInlinedDirective(CGF, CriticalOpGen); + } +} + +static void emitIfStmt(CodeGenFunction &CGF, llvm::Value *IfCond, + const RegionCodeGenTy &BodyOpGen) { llvm::Value *CallBool = CGF.EmitScalarConversion( IfCond, CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true), @@ -747,61 +1203,234 @@ static void EmitOMPIfStmt(CodeGenFunction &CGF, llvm::Value *IfCond, // Generate the branch (If-stmt) CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock); CGF.EmitBlock(ThenBlock); - BodyOpGen(); + CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, BodyOpGen); // Emit the rest of bblocks/branches CGF.EmitBranch(ContBlock); CGF.EmitBlock(ContBlock, true); } -void CGOpenMPRuntime::EmitOMPMasterRegion( - CodeGenFunction &CGF, const std::function<void()> &MasterOpGen, - SourceLocation Loc) { +void CGOpenMPRuntime::emitMasterRegion(CodeGenFunction &CGF, + const RegionCodeGenTy &MasterOpGen, + SourceLocation Loc) { // if(__kmpc_master(ident_t *, gtid)) { // MasterOpGen(); // __kmpc_end_master(ident_t *, gtid); // } // Prepare arguments and build a call to __kmpc_master - llvm::Value *Args[] = {EmitOpenMPUpdateLocation(CGF, Loc), - GetOpenMPThreadID(CGF, Loc)}; - auto RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_master); - auto *IsMaster = CGF.EmitRuntimeCall(RTLFn, Args); - EmitOMPIfStmt(CGF, IsMaster, [&]() -> void { - MasterOpGen(); - // Build a call to __kmpc_end_master. - // OpenMP [1.2.2 OpenMP Language Terminology] - // For C/C++, an executable statement, possibly compound, with a single - // entry at the top and a single exit at the bottom, or an OpenMP construct. - // * Access to the structured block must not be the result of a branch. - // * The point of exit cannot be a branch out of the structured block. - // * The point of entry must not be a call to setjmp(). - // * longjmp() and throw() must not violate the entry/exit criteria. - // * An expression statement, iteration statement, selection statement, or - // try block is considered to be a structured block if the corresponding - // compound statement obtained by enclosing it in { and } would be a - // structured block. - // It is analyzed in Sema, so we can just call __kmpc_end_master() on - // fallthrough rather than pushing a normal cleanup for it. - RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_end_master); - CGF.EmitRuntimeCall(RTLFn, Args); + llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)}; + auto *IsMaster = + CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_master), Args); + typedef CallEndCleanup<std::extent<decltype(Args)>::value> + MasterCallEndCleanup; + emitIfStmt(CGF, IsMaster, [&](CodeGenFunction &CGF) -> void { + CodeGenFunction::RunCleanupsScope Scope(CGF); + CGF.EHStack.pushCleanup<MasterCallEndCleanup>( + NormalAndEHCleanup, createRuntimeFunction(OMPRTL__kmpc_end_master), + llvm::makeArrayRef(Args)); + MasterOpGen(CGF); }); } -void CGOpenMPRuntime::EmitOMPBarrierCall(CodeGenFunction &CGF, - SourceLocation Loc, bool IsExplicit) { +void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF, + SourceLocation Loc) { + // Build call __kmpc_omp_taskyield(loc, thread_id, 0); + llvm::Value *Args[] = { + emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), + llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)}; + CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskyield), Args); +} + +static llvm::Value *emitCopyprivateCopyFunction( + CodeGenModule &CGM, llvm::Type *ArgsType, + ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, + ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps) { + auto &C = CGM.getContext(); + // void copy_func(void *LHSArg, void *RHSArg); + FunctionArgList Args; + ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, SourceLocation(), /*Id=*/nullptr, + C.VoidPtrTy); + ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, SourceLocation(), /*Id=*/nullptr, + C.VoidPtrTy); + Args.push_back(&LHSArg); + Args.push_back(&RHSArg); + FunctionType::ExtInfo EI; + auto &CGFI = CGM.getTypes().arrangeFreeFunctionDeclaration( + C.VoidTy, Args, EI, /*isVariadic=*/false); + auto *Fn = llvm::Function::Create( + CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, + ".omp.copyprivate.copy_func", &CGM.getModule()); + CGM.SetLLVMFunctionAttributes(/*D=*/nullptr, CGFI, Fn); + CodeGenFunction CGF(CGM); + CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args); + // Dest = (void*[n])(LHSArg); + // Src = (void*[n])(RHSArg); + auto *LHS = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( + CGF.Builder.CreateAlignedLoad(CGF.GetAddrOfLocalVar(&LHSArg), + CGF.PointerAlignInBytes), + ArgsType); + auto *RHS = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( + CGF.Builder.CreateAlignedLoad(CGF.GetAddrOfLocalVar(&RHSArg), + CGF.PointerAlignInBytes), + ArgsType); + // *(Type0*)Dst[0] = *(Type0*)Src[0]; + // *(Type1*)Dst[1] = *(Type1*)Src[1]; + // ... + // *(Typen*)Dst[n] = *(Typen*)Src[n]; + for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) { + auto *DestAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( + CGF.Builder.CreateAlignedLoad( + CGF.Builder.CreateStructGEP(nullptr, LHS, I), + CGM.PointerAlignInBytes), + CGF.ConvertTypeForMem(C.getPointerType(SrcExprs[I]->getType()))); + auto *SrcAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( + CGF.Builder.CreateAlignedLoad( + CGF.Builder.CreateStructGEP(nullptr, RHS, I), + CGM.PointerAlignInBytes), + CGF.ConvertTypeForMem(C.getPointerType(SrcExprs[I]->getType()))); + auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl(); + QualType Type = VD->getType(); + CGF.EmitOMPCopy(CGF, Type, DestAddr, SrcAddr, + cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl()), + cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl()), + AssignmentOps[I]); + } + CGF.FinishFunction(); + return Fn; +} + +void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF, + const RegionCodeGenTy &SingleOpGen, + SourceLocation Loc, + ArrayRef<const Expr *> CopyprivateVars, + ArrayRef<const Expr *> SrcExprs, + ArrayRef<const Expr *> DstExprs, + ArrayRef<const Expr *> AssignmentOps) { + assert(CopyprivateVars.size() == SrcExprs.size() && + CopyprivateVars.size() == DstExprs.size() && + CopyprivateVars.size() == AssignmentOps.size()); + auto &C = CGM.getContext(); + // int32 did_it = 0; + // if(__kmpc_single(ident_t *, gtid)) { + // SingleOpGen(); + // __kmpc_end_single(ident_t *, gtid); + // did_it = 1; + // } + // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>, + // <copy_func>, did_it); + + llvm::AllocaInst *DidIt = nullptr; + if (!CopyprivateVars.empty()) { + // int32 did_it = 0; + auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); + DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it"); + CGF.Builder.CreateAlignedStore(CGF.Builder.getInt32(0), DidIt, + DidIt->getAlignment()); + } + // Prepare arguments and build a call to __kmpc_single + llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)}; + auto *IsSingle = + CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_single), Args); + typedef CallEndCleanup<std::extent<decltype(Args)>::value> + SingleCallEndCleanup; + emitIfStmt(CGF, IsSingle, [&](CodeGenFunction &CGF) -> void { + CodeGenFunction::RunCleanupsScope Scope(CGF); + CGF.EHStack.pushCleanup<SingleCallEndCleanup>( + NormalAndEHCleanup, createRuntimeFunction(OMPRTL__kmpc_end_single), + llvm::makeArrayRef(Args)); + SingleOpGen(CGF); + if (DidIt) { + // did_it = 1; + CGF.Builder.CreateAlignedStore(CGF.Builder.getInt32(1), DidIt, + DidIt->getAlignment()); + } + }); + // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>, + // <copy_func>, did_it); + if (DidIt) { + llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size()); + auto CopyprivateArrayTy = + C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal, + /*IndexTypeQuals=*/0); + // Create a list of all private variables for copyprivate. + auto *CopyprivateList = + CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list"); + for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) { + auto *Elem = CGF.Builder.CreateStructGEP( + CopyprivateList->getAllocatedType(), CopyprivateList, I); + CGF.Builder.CreateAlignedStore( + CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( + CGF.EmitLValue(CopyprivateVars[I]).getAddress(), CGF.VoidPtrTy), + Elem, CGM.PointerAlignInBytes); + } + // Build function that copies private values from single region to all other + // threads in the corresponding parallel region. + auto *CpyFn = emitCopyprivateCopyFunction( + CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(), + CopyprivateVars, SrcExprs, DstExprs, AssignmentOps); + auto *BufSize = llvm::ConstantInt::get( + CGM.SizeTy, C.getTypeSizeInChars(CopyprivateArrayTy).getQuantity()); + auto *CL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList, + CGF.VoidPtrTy); + auto *DidItVal = + CGF.Builder.CreateAlignedLoad(DidIt, CGF.PointerAlignInBytes); + llvm::Value *Args[] = { + emitUpdateLocation(CGF, Loc), // ident_t *<loc> + getThreadID(CGF, Loc), // i32 <gtid> + BufSize, // size_t <buf_size> + CL, // void *<copyprivate list> + CpyFn, // void (*) (void *, void *) <copy_func> + DidItVal // i32 did_it + }; + CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_copyprivate), Args); + } +} + +void CGOpenMPRuntime::emitOrderedRegion(CodeGenFunction &CGF, + const RegionCodeGenTy &OrderedOpGen, + SourceLocation Loc) { + // __kmpc_ordered(ident_t *, gtid); + // OrderedOpGen(); + // __kmpc_end_ordered(ident_t *, gtid); + // Prepare arguments and build a call to __kmpc_ordered + { + CodeGenFunction::RunCleanupsScope Scope(CGF); + llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)}; + CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_ordered), Args); + // Build a call to __kmpc_end_ordered + CGF.EHStack.pushCleanup<CallEndCleanup<std::extent<decltype(Args)>::value>>( + NormalAndEHCleanup, createRuntimeFunction(OMPRTL__kmpc_end_ordered), + llvm::makeArrayRef(Args)); + emitInlinedDirective(CGF, OrderedOpGen); + } +} + +void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, + OpenMPDirectiveKind Kind) { // Build call __kmpc_cancel_barrier(loc, thread_id); - auto Flags = static_cast<OpenMPLocationFlags>( - OMP_IDENT_KMPC | - (IsExplicit ? OMP_IDENT_BARRIER_EXPL : OMP_IDENT_BARRIER_IMPL)); + OpenMPLocationFlags Flags = OMP_IDENT_KMPC; + if (Kind == OMPD_for) { + Flags = + static_cast<OpenMPLocationFlags>(Flags | OMP_IDENT_BARRIER_IMPL_FOR); + } else if (Kind == OMPD_sections) { + Flags = static_cast<OpenMPLocationFlags>(Flags | + OMP_IDENT_BARRIER_IMPL_SECTIONS); + } else if (Kind == OMPD_single) { + Flags = + static_cast<OpenMPLocationFlags>(Flags | OMP_IDENT_BARRIER_IMPL_SINGLE); + } else if (Kind == OMPD_barrier) { + Flags = static_cast<OpenMPLocationFlags>(Flags | OMP_IDENT_BARRIER_EXPL); + } else { + Flags = static_cast<OpenMPLocationFlags>(Flags | OMP_IDENT_BARRIER_IMPL); + } // Build call __kmpc_cancel_barrier(loc, thread_id); // Replace __kmpc_barrier() function by __kmpc_cancel_barrier() because this // one provides the same functionality and adds initial support for // cancellation constructs introduced in OpenMP 4.0. __kmpc_cancel_barrier() // is provided default by the runtime library so it safe to make such // replacement. - llvm::Value *Args[] = {EmitOpenMPUpdateLocation(CGF, Loc, Flags), - GetOpenMPThreadID(CGF, Loc)}; - auto RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_cancel_barrier); - CGF.EmitRuntimeCall(RTLFn, Args); + llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags), + getThreadID(CGF, Loc)}; + CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_cancel_barrier), Args); } /// \brief Schedule types for 'omp for' loops (these enumerators are taken from @@ -817,106 +1446,1017 @@ enum OpenMPSchedType { OMP_sch_auto = 38, /// \brief Lower bound for 'ordered' versions. OMP_ord_lower = 64, - /// \brief Lower bound for 'nomerge' versions. - OMP_nm_lower = 160, + OMP_ord_static_chunked = 65, + OMP_ord_static = 66, + OMP_ord_dynamic_chunked = 67, + OMP_ord_guided_chunked = 68, + OMP_ord_runtime = 69, + OMP_ord_auto = 70, + OMP_sch_default = OMP_sch_static, }; /// \brief Map the OpenMP loop schedule to the runtime enumeration. static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind, - bool Chunked) { + bool Chunked, bool Ordered) { switch (ScheduleKind) { case OMPC_SCHEDULE_static: - return Chunked ? OMP_sch_static_chunked : OMP_sch_static; + return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked) + : (Ordered ? OMP_ord_static : OMP_sch_static); case OMPC_SCHEDULE_dynamic: - return OMP_sch_dynamic_chunked; + return Ordered ? OMP_ord_dynamic_chunked : OMP_sch_dynamic_chunked; case OMPC_SCHEDULE_guided: - return OMP_sch_guided_chunked; - case OMPC_SCHEDULE_auto: - return OMP_sch_auto; + return Ordered ? OMP_ord_guided_chunked : OMP_sch_guided_chunked; case OMPC_SCHEDULE_runtime: - return OMP_sch_runtime; + return Ordered ? OMP_ord_runtime : OMP_sch_runtime; + case OMPC_SCHEDULE_auto: + return Ordered ? OMP_ord_auto : OMP_sch_auto; case OMPC_SCHEDULE_unknown: assert(!Chunked && "chunk was specified but schedule kind not known"); - return OMP_sch_static; + return Ordered ? OMP_ord_static : OMP_sch_static; } llvm_unreachable("Unexpected runtime schedule"); } bool CGOpenMPRuntime::isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const { - auto Schedule = getRuntimeSchedule(ScheduleKind, Chunked); + auto Schedule = getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false); return Schedule == OMP_sch_static; } -void CGOpenMPRuntime::EmitOMPForInit(CodeGenFunction &CGF, SourceLocation Loc, - OpenMPScheduleClauseKind ScheduleKind, - unsigned IVSize, bool IVSigned, - llvm::Value *IL, llvm::Value *LB, - llvm::Value *UB, llvm::Value *ST, - llvm::Value *Chunk) { - OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunk != nullptr); - // Call __kmpc_for_static_init( - // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype, - // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower, - // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride, - // kmp_int[32|64] incr, kmp_int[32|64] chunk); - // TODO: Implement dynamic schedule. - - // If the Chunk was not specified in the clause - use default value 1. - if (Chunk == nullptr) - Chunk = CGF.Builder.getIntN(IVSize, /*C*/ 1); +bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const { + auto Schedule = + getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false); + assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here"); + return Schedule != OMP_sch_static; +} + +void CGOpenMPRuntime::emitForInit(CodeGenFunction &CGF, SourceLocation Loc, + OpenMPScheduleClauseKind ScheduleKind, + unsigned IVSize, bool IVSigned, bool Ordered, + llvm::Value *IL, llvm::Value *LB, + llvm::Value *UB, llvm::Value *ST, + llvm::Value *Chunk) { + OpenMPSchedType Schedule = + getRuntimeSchedule(ScheduleKind, Chunk != nullptr, Ordered); + if (Ordered || + (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked && + Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked)) { + // Call __kmpc_dispatch_init( + // ident_t *loc, kmp_int32 tid, kmp_int32 schedule, + // kmp_int[32|64] lower, kmp_int[32|64] upper, + // kmp_int[32|64] stride, kmp_int[32|64] chunk); + + // If the Chunk was not specified in the clause - use default value 1. + if (Chunk == nullptr) + Chunk = CGF.Builder.getIntN(IVSize, 1); + llvm::Value *Args[] = { emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC), + getThreadID(CGF, Loc), + CGF.Builder.getInt32(Schedule), // Schedule type + CGF.Builder.getIntN(IVSize, 0), // Lower + UB, // Upper + CGF.Builder.getIntN(IVSize, 1), // Stride + Chunk // Chunk + }; + CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args); + } else { + // Call __kmpc_for_static_init( + // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype, + // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower, + // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride, + // kmp_int[32|64] incr, kmp_int[32|64] chunk); + if (Chunk == nullptr) { + assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static) && + "expected static non-chunked schedule"); + // If the Chunk was not specified in the clause - use default value 1. + Chunk = CGF.Builder.getIntN(IVSize, 1); + } else + assert((Schedule == OMP_sch_static_chunked || + Schedule == OMP_ord_static_chunked) && + "expected static chunked schedule"); + llvm::Value *Args[] = { emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC), + getThreadID(CGF, Loc), + CGF.Builder.getInt32(Schedule), // Schedule type + IL, // &isLastIter + LB, // &LB + UB, // &UB + ST, // &Stride + CGF.Builder.getIntN(IVSize, 1), // Incr + Chunk // Chunk + }; + CGF.EmitRuntimeCall(createForStaticInitFunction(IVSize, IVSigned), Args); + } +} +void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF, + SourceLocation Loc) { + // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid); + llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC), + getThreadID(CGF, Loc)}; + CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_for_static_fini), + Args); +} + +void CGOpenMPRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF, + SourceLocation Loc, + unsigned IVSize, + bool IVSigned) { + // Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid); + llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC), + getThreadID(CGF, Loc)}; + CGF.EmitRuntimeCall(createDispatchFiniFunction(IVSize, IVSigned), Args); +} + +llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF, + SourceLocation Loc, unsigned IVSize, + bool IVSigned, llvm::Value *IL, + llvm::Value *LB, llvm::Value *UB, + llvm::Value *ST) { + // Call __kmpc_dispatch_next( + // ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, + // kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, + // kmp_int[32|64] *p_stride); llvm::Value *Args[] = { - EmitOpenMPUpdateLocation(CGF, Loc, OMP_IDENT_KMPC), - GetOpenMPThreadID(CGF, Loc), - CGF.Builder.getInt32(Schedule), // Schedule type - IL, // &isLastIter - LB, // &LB - UB, // &UB - ST, // &Stride - CGF.Builder.getIntN(IVSize, 1), // Incr - Chunk // Chunk + emitUpdateLocation(CGF, Loc, OMP_IDENT_KMPC), getThreadID(CGF, Loc), + IL, // &isLastIter + LB, // &Lower + UB, // &Upper + ST // &Stride }; - assert((IVSize == 32 || IVSize == 64) && - "Index size is not compatible with the omp runtime"); - auto F = IVSize == 32 ? (IVSigned ? OMPRTL__kmpc_for_static_init_4 - : OMPRTL__kmpc_for_static_init_4u) - : (IVSigned ? OMPRTL__kmpc_for_static_init_8 - : OMPRTL__kmpc_for_static_init_8u); - auto RTLFn = CreateRuntimeFunction(F); - CGF.EmitRuntimeCall(RTLFn, Args); -} - -void CGOpenMPRuntime::EmitOMPForFinish(CodeGenFunction &CGF, SourceLocation Loc, - OpenMPScheduleClauseKind ScheduleKind) { - assert((ScheduleKind == OMPC_SCHEDULE_static || - ScheduleKind == OMPC_SCHEDULE_unknown) && - "Non-static schedule kinds are not yet implemented"); - // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid); - llvm::Value *Args[] = {EmitOpenMPUpdateLocation(CGF, Loc, OMP_IDENT_KMPC), - GetOpenMPThreadID(CGF, Loc)}; - auto RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_for_static_fini); - CGF.EmitRuntimeCall(RTLFn, Args); + llvm::Value *Call = + CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args); + return CGF.EmitScalarConversion( + Call, CGF.getContext().getIntTypeForBitwidth(32, /* Signed */ true), + CGF.getContext().BoolTy); } -void CGOpenMPRuntime::EmitOMPNumThreadsClause(CodeGenFunction &CGF, - llvm::Value *NumThreads, - SourceLocation Loc) { +void CGOpenMPRuntime::emitNumThreadsClause(CodeGenFunction &CGF, + llvm::Value *NumThreads, + SourceLocation Loc) { // Build call __kmpc_push_num_threads(&loc, global_tid, num_threads) llvm::Value *Args[] = { - EmitOpenMPUpdateLocation(CGF, Loc), GetOpenMPThreadID(CGF, Loc), + emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)}; - llvm::Constant *RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_push_num_threads); - CGF.EmitRuntimeCall(RTLFn, Args); + CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_num_threads), + Args); +} + +void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>, + SourceLocation Loc) { + // Build call void __kmpc_flush(ident_t *loc) + CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_flush), + emitUpdateLocation(CGF, Loc)); +} + +namespace { +/// \brief Indexes of fields for type kmp_task_t. +enum KmpTaskTFields { + /// \brief List of shared variables. + KmpTaskTShareds, + /// \brief Task routine. + KmpTaskTRoutine, + /// \brief Partition id for the untied tasks. + KmpTaskTPartId, + /// \brief Function with call of destructors for private variables. + KmpTaskTDestructors, +}; +} // namespace + +void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) { + if (!KmpRoutineEntryPtrTy) { + // Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type. + auto &C = CGM.getContext(); + QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy}; + FunctionProtoType::ExtProtoInfo EPI; + KmpRoutineEntryPtrQTy = C.getPointerType( + C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI)); + KmpRoutineEntryPtrTy = CGM.getTypes().ConvertType(KmpRoutineEntryPtrQTy); + } +} + +static void addFieldToRecordDecl(ASTContext &C, DeclContext *DC, + QualType FieldTy) { + auto *Field = FieldDecl::Create( + C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy, + C.getTrivialTypeSourceInfo(FieldTy, SourceLocation()), + /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit); + Field->setAccess(AS_public); + DC->addDecl(Field); +} + +namespace { +struct PrivateHelpersTy { + PrivateHelpersTy(const VarDecl *Original, const VarDecl *PrivateCopy, + const VarDecl *PrivateElemInit) + : Original(Original), PrivateCopy(PrivateCopy), + PrivateElemInit(PrivateElemInit) {} + const VarDecl *Original; + const VarDecl *PrivateCopy; + const VarDecl *PrivateElemInit; +}; +typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy; +} // namespace + +static RecordDecl * +createPrivatesRecordDecl(CodeGenModule &CGM, + const ArrayRef<PrivateDataTy> Privates) { + if (!Privates.empty()) { + auto &C = CGM.getContext(); + // Build struct .kmp_privates_t. { + // /* private vars */ + // }; + auto *RD = C.buildImplicitRecord(".kmp_privates.t"); + RD->startDefinition(); + for (auto &&Pair : Privates) { + auto Type = Pair.second.Original->getType(); + Type = Type.getNonReferenceType(); + addFieldToRecordDecl(C, RD, Type); + } + RD->completeDefinition(); + return RD; + } + return nullptr; } -void CGOpenMPRuntime::EmitOMPFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>, - SourceLocation Loc) { - // Build call void __kmpc_flush(ident_t *loc, ...) - // FIXME: List of variables is ignored by libiomp5 runtime, no need to - // generate it, just request full memory fence. - llvm::Value *Args[] = {EmitOpenMPUpdateLocation(CGF, Loc), - llvm::ConstantInt::get(CGM.Int32Ty, 0)}; - auto *RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_flush); - CGF.EmitRuntimeCall(RTLFn, Args); +static RecordDecl * +createKmpTaskTRecordDecl(CodeGenModule &CGM, QualType KmpInt32Ty, + QualType KmpRoutineEntryPointerQTy) { + auto &C = CGM.getContext(); + // Build struct kmp_task_t { + // void * shareds; + // kmp_routine_entry_t routine; + // kmp_int32 part_id; + // kmp_routine_entry_t destructors; + // }; + auto *RD = C.buildImplicitRecord("kmp_task_t"); + RD->startDefinition(); + addFieldToRecordDecl(C, RD, C.VoidPtrTy); + addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy); + addFieldToRecordDecl(C, RD, KmpInt32Ty); + addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy); + RD->completeDefinition(); + return RD; +} + +static RecordDecl * +createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy, + const ArrayRef<PrivateDataTy> Privates) { + auto &C = CGM.getContext(); + // Build struct kmp_task_t_with_privates { + // kmp_task_t task_data; + // .kmp_privates_t. privates; + // }; + auto *RD = C.buildImplicitRecord("kmp_task_t_with_privates"); + RD->startDefinition(); + addFieldToRecordDecl(C, RD, KmpTaskTQTy); + if (auto *PrivateRD = createPrivatesRecordDecl(CGM, Privates)) { + addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD)); + } + RD->completeDefinition(); + return RD; } + +/// \brief Emit a proxy function which accepts kmp_task_t as the second +/// argument. +/// \code +/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { +/// TaskFunction(gtid, tt->part_id, &tt->privates, task_privates_map, +/// tt->shareds); +/// return 0; +/// } +/// \endcode +static llvm::Value * +emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc, + QualType KmpInt32Ty, QualType KmpTaskTWithPrivatesPtrQTy, + QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy, + QualType SharedsPtrTy, llvm::Value *TaskFunction, + llvm::Value *TaskPrivatesMap) { + auto &C = CGM.getContext(); + FunctionArgList Args; + ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty); + ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, + /*Id=*/nullptr, KmpTaskTWithPrivatesPtrQTy); + Args.push_back(&GtidArg); + Args.push_back(&TaskTypeArg); + FunctionType::ExtInfo Info; + auto &TaskEntryFnInfo = + CGM.getTypes().arrangeFreeFunctionDeclaration(KmpInt32Ty, Args, Info, + /*isVariadic=*/false); + auto *TaskEntryTy = CGM.getTypes().GetFunctionType(TaskEntryFnInfo); + auto *TaskEntry = + llvm::Function::Create(TaskEntryTy, llvm::GlobalValue::InternalLinkage, + ".omp_task_entry.", &CGM.getModule()); + CGM.SetLLVMFunctionAttributes(/*D=*/nullptr, TaskEntryFnInfo, TaskEntry); + CodeGenFunction CGF(CGM); + CGF.disableDebugInfo(); + CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args); + + // TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map, + // tt->task_data.shareds); + auto *GtidParam = CGF.EmitLoadOfScalar( + CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, + C.getTypeAlignInChars(KmpInt32Ty).getQuantity(), KmpInt32Ty, Loc); + auto *TaskTypeArgAddr = CGF.Builder.CreateAlignedLoad( + CGF.GetAddrOfLocalVar(&TaskTypeArg), CGM.PointerAlignInBytes); + LValue TDBase = + CGF.MakeNaturalAlignAddrLValue(TaskTypeArgAddr, KmpTaskTWithPrivatesQTy); + auto *KmpTaskTWithPrivatesQTyRD = + cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl()); + LValue Base = + CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin()); + auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl()); + auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId); + auto PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI); + auto *PartidParam = CGF.EmitLoadOfLValue(PartIdLVal, Loc).getScalarVal(); + + auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds); + auto SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI); + auto *SharedsParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( + CGF.EmitLoadOfLValue(SharedsLVal, Loc).getScalarVal(), + CGF.ConvertTypeForMem(SharedsPtrTy)); + + auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1); + llvm::Value *PrivatesParam; + if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) { + auto PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI); + PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( + PrivatesLVal.getAddress(), CGF.VoidPtrTy); + } else { + PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy); + } + + llvm::Value *CallArgs[] = {GtidParam, PartidParam, PrivatesParam, + TaskPrivatesMap, SharedsParam}; + CGF.EmitCallOrInvoke(TaskFunction, CallArgs); + CGF.EmitStoreThroughLValue( + RValue::get(CGF.Builder.getInt32(/*C=*/0)), + CGF.MakeNaturalAlignAddrLValue(CGF.ReturnValue, KmpInt32Ty)); + CGF.FinishFunction(); + return TaskEntry; +} + +static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM, + SourceLocation Loc, + QualType KmpInt32Ty, + QualType KmpTaskTWithPrivatesPtrQTy, + QualType KmpTaskTWithPrivatesQTy) { + auto &C = CGM.getContext(); + FunctionArgList Args; + ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty); + ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, + /*Id=*/nullptr, KmpTaskTWithPrivatesPtrQTy); + Args.push_back(&GtidArg); + Args.push_back(&TaskTypeArg); + FunctionType::ExtInfo Info; + auto &DestructorFnInfo = + CGM.getTypes().arrangeFreeFunctionDeclaration(KmpInt32Ty, Args, Info, + /*isVariadic=*/false); + auto *DestructorFnTy = CGM.getTypes().GetFunctionType(DestructorFnInfo); + auto *DestructorFn = + llvm::Function::Create(DestructorFnTy, llvm::GlobalValue::InternalLinkage, + ".omp_task_destructor.", &CGM.getModule()); + CGM.SetLLVMFunctionAttributes(/*D=*/nullptr, DestructorFnInfo, DestructorFn); + CodeGenFunction CGF(CGM); + CGF.disableDebugInfo(); + CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo, + Args); + + auto *TaskTypeArgAddr = CGF.Builder.CreateAlignedLoad( + CGF.GetAddrOfLocalVar(&TaskTypeArg), CGM.PointerAlignInBytes); + LValue Base = + CGF.MakeNaturalAlignAddrLValue(TaskTypeArgAddr, KmpTaskTWithPrivatesQTy); + auto *KmpTaskTWithPrivatesQTyRD = + cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl()); + auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin()); + Base = CGF.EmitLValueForField(Base, *FI); + for (auto *Field : + cast<RecordDecl>(FI->getType()->getAsTagDecl())->fields()) { + if (auto DtorKind = Field->getType().isDestructedType()) { + auto FieldLValue = CGF.EmitLValueForField(Base, Field); + CGF.pushDestroy(DtorKind, FieldLValue.getAddress(), Field->getType()); + } + } + CGF.FinishFunction(); + return DestructorFn; +} + +/// \brief Emit a privates mapping function for correct handling of private and +/// firstprivate variables. +/// \code +/// void .omp_task_privates_map.(const .privates. *noalias privs, <ty1> +/// **noalias priv1,..., <tyn> **noalias privn) { +/// *priv1 = &.privates.priv1; +/// ...; +/// *privn = &.privates.privn; +/// } +/// \endcode +static llvm::Value * +emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc, + const ArrayRef<const Expr *> PrivateVars, + const ArrayRef<const Expr *> FirstprivateVars, + QualType PrivatesQTy, + const ArrayRef<PrivateDataTy> Privates) { + auto &C = CGM.getContext(); + FunctionArgList Args; + ImplicitParamDecl TaskPrivatesArg( + C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, + C.getPointerType(PrivatesQTy).withConst().withRestrict()); + Args.push_back(&TaskPrivatesArg); + llvm::DenseMap<const VarDecl *, unsigned> PrivateVarsPos; + unsigned Counter = 1; + for (auto *E: PrivateVars) { + Args.push_back(ImplicitParamDecl::Create( + C, /*DC=*/nullptr, Loc, + /*Id=*/nullptr, C.getPointerType(C.getPointerType(E->getType())) + .withConst() + .withRestrict())); + auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); + PrivateVarsPos[VD] = Counter; + ++Counter; + } + for (auto *E : FirstprivateVars) { + Args.push_back(ImplicitParamDecl::Create( + C, /*DC=*/nullptr, Loc, + /*Id=*/nullptr, C.getPointerType(C.getPointerType(E->getType())) + .withConst() + .withRestrict())); + auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); + PrivateVarsPos[VD] = Counter; + ++Counter; + } + FunctionType::ExtInfo Info; + auto &TaskPrivatesMapFnInfo = + CGM.getTypes().arrangeFreeFunctionDeclaration(C.VoidTy, Args, Info, + /*isVariadic=*/false); + auto *TaskPrivatesMapTy = + CGM.getTypes().GetFunctionType(TaskPrivatesMapFnInfo); + auto *TaskPrivatesMap = llvm::Function::Create( + TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage, + ".omp_task_privates_map.", &CGM.getModule()); + CGM.SetLLVMFunctionAttributes(/*D=*/nullptr, TaskPrivatesMapFnInfo, + TaskPrivatesMap); + TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline); + CodeGenFunction CGF(CGM); + CGF.disableDebugInfo(); + CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskPrivatesMap, + TaskPrivatesMapFnInfo, Args); + + // *privi = &.privates.privi; + auto *TaskPrivatesArgAddr = CGF.Builder.CreateAlignedLoad( + CGF.GetAddrOfLocalVar(&TaskPrivatesArg), CGM.PointerAlignInBytes); + LValue Base = + CGF.MakeNaturalAlignAddrLValue(TaskPrivatesArgAddr, PrivatesQTy); + auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl()); + Counter = 0; + for (auto *Field : PrivatesQTyRD->fields()) { + auto FieldLVal = CGF.EmitLValueForField(Base, Field); + auto *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]]; + auto RefLVal = CGF.MakeNaturalAlignAddrLValue(CGF.GetAddrOfLocalVar(VD), + VD->getType()); + auto RefLoadRVal = CGF.EmitLoadOfLValue(RefLVal, Loc); + CGF.EmitStoreOfScalar( + FieldLVal.getAddress(), + CGF.MakeNaturalAlignAddrLValue(RefLoadRVal.getScalarVal(), + RefLVal.getType()->getPointeeType())); + ++Counter; + } + CGF.FinishFunction(); + return TaskPrivatesMap; +} + +static int array_pod_sort_comparator(const PrivateDataTy *P1, + const PrivateDataTy *P2) { + return P1->first < P2->first ? 1 : (P2->first < P1->first ? -1 : 0); +} + +void CGOpenMPRuntime::emitTaskCall( + CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, + bool Tied, llvm::PointerIntPair<llvm::Value *, 1, bool> Final, + llvm::Value *TaskFunction, QualType SharedsTy, llvm::Value *Shareds, + const Expr *IfCond, const ArrayRef<const Expr *> PrivateVars, + const ArrayRef<const Expr *> PrivateCopies, + const ArrayRef<const Expr *> FirstprivateVars, + const ArrayRef<const Expr *> FirstprivateCopies, + const ArrayRef<const Expr *> FirstprivateInits) { + auto &C = CGM.getContext(); + llvm::SmallVector<PrivateDataTy, 8> Privates; + // Aggregate privates and sort them by the alignment. + auto I = PrivateCopies.begin(); + for (auto *E : PrivateVars) { + auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); + Privates.push_back(std::make_pair( + C.getTypeAlignInChars(VD->getType()), + PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()), + /*PrivateElemInit=*/nullptr))); + ++I; + } + I = FirstprivateCopies.begin(); + auto IElemInitRef = FirstprivateInits.begin(); + for (auto *E : FirstprivateVars) { + auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); + Privates.push_back(std::make_pair( + C.getTypeAlignInChars(VD->getType()), + PrivateHelpersTy( + VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()), + cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl())))); + ++I, ++IElemInitRef; + } + llvm::array_pod_sort(Privates.begin(), Privates.end(), + array_pod_sort_comparator); + auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); + // Build type kmp_routine_entry_t (if not built yet). + emitKmpRoutineEntryT(KmpInt32Ty); + // Build type kmp_task_t (if not built yet). + if (KmpTaskTQTy.isNull()) { + KmpTaskTQTy = C.getRecordType( + createKmpTaskTRecordDecl(CGM, KmpInt32Ty, KmpRoutineEntryPtrQTy)); + } + auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl()); + // Build particular struct kmp_task_t for the given task. + auto *KmpTaskTWithPrivatesQTyRD = + createKmpTaskTWithPrivatesRecordDecl(CGM, KmpTaskTQTy, Privates); + auto KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD); + QualType KmpTaskTWithPrivatesPtrQTy = + C.getPointerType(KmpTaskTWithPrivatesQTy); + auto *KmpTaskTWithPrivatesTy = CGF.ConvertType(KmpTaskTWithPrivatesQTy); + auto *KmpTaskTWithPrivatesPtrTy = KmpTaskTWithPrivatesTy->getPointerTo(); + auto KmpTaskTWithPrivatesTySize = + CGM.getSize(C.getTypeSizeInChars(KmpTaskTWithPrivatesQTy)); + QualType SharedsPtrTy = C.getPointerType(SharedsTy); + + // Emit initial values for private copies (if any). + llvm::Value *TaskPrivatesMap = nullptr; + auto *TaskPrivatesMapTy = + std::next(cast<llvm::Function>(TaskFunction)->getArgumentList().begin(), + 3) + ->getType(); + if (!Privates.empty()) { + auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin()); + TaskPrivatesMap = emitTaskPrivateMappingFunction( + CGM, Loc, PrivateVars, FirstprivateVars, FI->getType(), Privates); + TaskPrivatesMap = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( + TaskPrivatesMap, TaskPrivatesMapTy); + } else { + TaskPrivatesMap = llvm::ConstantPointerNull::get( + cast<llvm::PointerType>(TaskPrivatesMapTy)); + } + // Build a proxy function kmp_int32 .omp_task_entry.(kmp_int32 gtid, + // kmp_task_t *tt); + auto *TaskEntry = emitProxyTaskFunction( + CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy, KmpTaskTWithPrivatesQTy, + KmpTaskTQTy, SharedsPtrTy, TaskFunction, TaskPrivatesMap); + + // Build call kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid, + // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, + // kmp_routine_entry_t *task_entry); + // Task flags. Format is taken from + // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h, + // description of kmp_tasking_flags struct. + const unsigned TiedFlag = 0x1; + const unsigned FinalFlag = 0x2; + unsigned Flags = Tied ? TiedFlag : 0; + auto *TaskFlags = + Final.getPointer() + ? CGF.Builder.CreateSelect(Final.getPointer(), + CGF.Builder.getInt32(FinalFlag), + CGF.Builder.getInt32(/*C=*/0)) + : CGF.Builder.getInt32(Final.getInt() ? FinalFlag : 0); + TaskFlags = CGF.Builder.CreateOr(TaskFlags, CGF.Builder.getInt32(Flags)); + auto SharedsSize = C.getTypeSizeInChars(SharedsTy); + llvm::Value *AllocArgs[] = { + emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), TaskFlags, + KmpTaskTWithPrivatesTySize, CGM.getSize(SharedsSize), + CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TaskEntry, + KmpRoutineEntryPtrTy)}; + auto *NewTask = CGF.EmitRuntimeCall( + createRuntimeFunction(OMPRTL__kmpc_omp_task_alloc), AllocArgs); + auto *NewTaskNewTaskTTy = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( + NewTask, KmpTaskTWithPrivatesPtrTy); + LValue Base = CGF.MakeNaturalAlignAddrLValue(NewTaskNewTaskTTy, + KmpTaskTWithPrivatesQTy); + LValue TDBase = + CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin()); + // Fill the data in the resulting kmp_task_t record. + // Copy shareds if there are any. + llvm::Value *KmpTaskSharedsPtr = nullptr; + if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) { + KmpTaskSharedsPtr = CGF.EmitLoadOfScalar( + CGF.EmitLValueForField( + TDBase, *std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds)), + Loc); + CGF.EmitAggregateCopy(KmpTaskSharedsPtr, Shareds, SharedsTy); + } + // Emit initial values for private copies (if any). + bool NeedsCleanup = false; + if (!Privates.empty()) { + auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin()); + auto PrivatesBase = CGF.EmitLValueForField(Base, *FI); + FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin(); + LValue SharedsBase; + if (!FirstprivateVars.empty()) { + SharedsBase = CGF.MakeNaturalAlignAddrLValue( + CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( + KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy)), + SharedsTy); + } + CodeGenFunction::CGCapturedStmtInfo CapturesInfo( + cast<CapturedStmt>(*D.getAssociatedStmt())); + for (auto &&Pair : Privates) { + auto *VD = Pair.second.PrivateCopy; + auto *Init = VD->getAnyInitializer(); + LValue PrivateLValue = CGF.EmitLValueForField(PrivatesBase, *FI); + if (Init) { + if (auto *Elem = Pair.second.PrivateElemInit) { + auto *OriginalVD = Pair.second.Original; + auto *SharedField = CapturesInfo.lookup(OriginalVD); + auto SharedRefLValue = + CGF.EmitLValueForField(SharedsBase, SharedField); + QualType Type = OriginalVD->getType(); + if (Type->isArrayType()) { + // Initialize firstprivate array. + if (!isa<CXXConstructExpr>(Init) || + CGF.isTrivialInitializer(Init)) { + // Perform simple memcpy. + CGF.EmitAggregateAssign(PrivateLValue.getAddress(), + SharedRefLValue.getAddress(), Type); + } else { + // Initialize firstprivate array using element-by-element + // intialization. + CGF.EmitOMPAggregateAssign( + PrivateLValue.getAddress(), SharedRefLValue.getAddress(), + Type, [&CGF, Elem, Init, &CapturesInfo]( + llvm::Value *DestElement, llvm::Value *SrcElement) { + // Clean up any temporaries needed by the initialization. + CodeGenFunction::OMPPrivateScope InitScope(CGF); + InitScope.addPrivate(Elem, [SrcElement]() -> llvm::Value *{ + return SrcElement; + }); + (void)InitScope.Privatize(); + // Emit initialization for single element. + auto *OldCapturedStmtInfo = CGF.CapturedStmtInfo; + CGF.CapturedStmtInfo = &CapturesInfo; + CGF.EmitAnyExprToMem(Init, DestElement, + Init->getType().getQualifiers(), + /*IsInitializer=*/false); + CGF.CapturedStmtInfo = OldCapturedStmtInfo; + }); + } + } else { + CodeGenFunction::OMPPrivateScope InitScope(CGF); + InitScope.addPrivate(Elem, [SharedRefLValue]() -> llvm::Value *{ + return SharedRefLValue.getAddress(); + }); + (void)InitScope.Privatize(); + auto *OldCapturedStmtInfo = CGF.CapturedStmtInfo; + CGF.CapturedStmtInfo = &CapturesInfo; + CGF.EmitExprAsInit(Init, VD, PrivateLValue, + /*capturedByInit=*/false); + CGF.CapturedStmtInfo = OldCapturedStmtInfo; + } + } else { + CGF.EmitExprAsInit(Init, VD, PrivateLValue, /*capturedByInit=*/false); + } + } + NeedsCleanup = NeedsCleanup || FI->getType().isDestructedType(); + ++FI; + } + } + // Provide pointer to function with destructors for privates. + llvm::Value *DestructorFn = + NeedsCleanup ? emitDestructorsFunction(CGM, Loc, KmpInt32Ty, + KmpTaskTWithPrivatesPtrQTy, + KmpTaskTWithPrivatesQTy) + : llvm::ConstantPointerNull::get( + cast<llvm::PointerType>(KmpRoutineEntryPtrTy)); + LValue Destructor = CGF.EmitLValueForField( + TDBase, *std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTDestructors)); + CGF.EmitStoreOfScalar(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( + DestructorFn, KmpRoutineEntryPtrTy), + Destructor); + // NOTE: routine and part_id fields are intialized by __kmpc_omp_task_alloc() + // libcall. + // Build kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t + // *new_task); + auto *ThreadID = getThreadID(CGF, Loc); + llvm::Value *TaskArgs[] = {emitUpdateLocation(CGF, Loc), ThreadID, NewTask}; + auto &&ThenCodeGen = [this, &TaskArgs](CodeGenFunction &CGF) { + // TODO: add check for untied tasks. + CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task), TaskArgs); + }; + typedef CallEndCleanup<std::extent<decltype(TaskArgs)>::value> + IfCallEndCleanup; + auto &&ElseCodeGen = + [this, &TaskArgs, ThreadID, NewTaskNewTaskTTy, TaskEntry]( + CodeGenFunction &CGF) { + CodeGenFunction::RunCleanupsScope LocalScope(CGF); + CGF.EmitRuntimeCall( + createRuntimeFunction(OMPRTL__kmpc_omp_task_begin_if0), TaskArgs); + // Build void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid, + // kmp_task_t *new_task); + CGF.EHStack.pushCleanup<IfCallEndCleanup>( + NormalAndEHCleanup, + createRuntimeFunction(OMPRTL__kmpc_omp_task_complete_if0), + llvm::makeArrayRef(TaskArgs)); + + // Call proxy_task_entry(gtid, new_task); + llvm::Value *OutlinedFnArgs[] = {ThreadID, NewTaskNewTaskTTy}; + CGF.EmitCallOrInvoke(TaskEntry, OutlinedFnArgs); + }; + if (IfCond) { + emitOMPIfClause(CGF, IfCond, ThenCodeGen, ElseCodeGen); + } else { + CodeGenFunction::RunCleanupsScope Scope(CGF); + ThenCodeGen(CGF); + } +} + +static llvm::Value *emitReductionFunction(CodeGenModule &CGM, + llvm::Type *ArgsType, + ArrayRef<const Expr *> LHSExprs, + ArrayRef<const Expr *> RHSExprs, + ArrayRef<const Expr *> ReductionOps) { + auto &C = CGM.getContext(); + + // void reduction_func(void *LHSArg, void *RHSArg); + FunctionArgList Args; + ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, SourceLocation(), /*Id=*/nullptr, + C.VoidPtrTy); + ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, SourceLocation(), /*Id=*/nullptr, + C.VoidPtrTy); + Args.push_back(&LHSArg); + Args.push_back(&RHSArg); + FunctionType::ExtInfo EI; + auto &CGFI = CGM.getTypes().arrangeFreeFunctionDeclaration( + C.VoidTy, Args, EI, /*isVariadic=*/false); + auto *Fn = llvm::Function::Create( + CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, + ".omp.reduction.reduction_func", &CGM.getModule()); + CGM.SetLLVMFunctionAttributes(/*D=*/nullptr, CGFI, Fn); + CodeGenFunction CGF(CGM); + CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args); + + // Dst = (void*[n])(LHSArg); + // Src = (void*[n])(RHSArg); + auto *LHS = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( + CGF.Builder.CreateAlignedLoad(CGF.GetAddrOfLocalVar(&LHSArg), + CGF.PointerAlignInBytes), + ArgsType); + auto *RHS = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( + CGF.Builder.CreateAlignedLoad(CGF.GetAddrOfLocalVar(&RHSArg), + CGF.PointerAlignInBytes), + ArgsType); + + // ... + // *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); + // ... + CodeGenFunction::OMPPrivateScope Scope(CGF); + for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I) { + Scope.addPrivate( + cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl()), + [&]() -> llvm::Value *{ + return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( + CGF.Builder.CreateAlignedLoad( + CGF.Builder.CreateStructGEP(/*Ty=*/nullptr, RHS, I), + CGM.PointerAlignInBytes), + CGF.ConvertTypeForMem(C.getPointerType(RHSExprs[I]->getType()))); + }); + Scope.addPrivate( + cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl()), + [&]() -> llvm::Value *{ + return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( + CGF.Builder.CreateAlignedLoad( + CGF.Builder.CreateStructGEP(/*Ty=*/nullptr, LHS, I), + CGM.PointerAlignInBytes), + CGF.ConvertTypeForMem(C.getPointerType(LHSExprs[I]->getType()))); + }); + } + Scope.Privatize(); + for (auto *E : ReductionOps) { + CGF.EmitIgnoredExpr(E); + } + Scope.ForceCleanup(); + CGF.FinishFunction(); + return Fn; +} + +void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc, + ArrayRef<const Expr *> LHSExprs, + ArrayRef<const Expr *> RHSExprs, + ArrayRef<const Expr *> ReductionOps, + bool WithNowait) { + // Next code should be emitted for reduction: + // + // static kmp_critical_name lock = { 0 }; + // + // void reduce_func(void *lhs[<n>], void *rhs[<n>]) { + // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]); + // ... + // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1], + // *(Type<n>-1*)rhs[<n>-1]); + // } + // + // ... + // void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; + // switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), + // RedList, reduce_func, &<lock>)) { + // case 1: + // ... + // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); + // ... + // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); + // break; + // case 2: + // ... + // Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); + // ... + // [__kmpc_end_reduce(<loc>, <gtid>, &<lock>);] + // break; + // default:; + // } + + auto &C = CGM.getContext(); + + // 1. Build a list of reduction variables. + // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; + llvm::APInt ArraySize(/*unsigned int numBits=*/32, RHSExprs.size()); + QualType ReductionArrayTy = + C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal, + /*IndexTypeQuals=*/0); + auto *ReductionList = + CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list"); + for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I) { + auto *Elem = CGF.Builder.CreateStructGEP(/*Ty=*/nullptr, ReductionList, I); + CGF.Builder.CreateAlignedStore( + CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( + CGF.EmitLValue(RHSExprs[I]).getAddress(), CGF.VoidPtrTy), + Elem, CGM.PointerAlignInBytes); + } + + // 2. Emit reduce_func(). + auto *ReductionFn = emitReductionFunction( + CGM, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), LHSExprs, + RHSExprs, ReductionOps); + + // 3. Create static kmp_critical_name lock = { 0 }; + auto *Lock = getCriticalRegionLock(".reduction"); + + // 4. Build res = __kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), + // RedList, reduce_func, &<lock>); + auto *IdentTLoc = emitUpdateLocation( + CGF, Loc, + static_cast<OpenMPLocationFlags>(OMP_IDENT_KMPC | OMP_ATOMIC_REDUCE)); + auto *ThreadId = getThreadID(CGF, Loc); + auto *ReductionArrayTySize = llvm::ConstantInt::get( + CGM.SizeTy, C.getTypeSizeInChars(ReductionArrayTy).getQuantity()); + auto *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(ReductionList, + CGF.VoidPtrTy); + llvm::Value *Args[] = { + IdentTLoc, // ident_t *<loc> + ThreadId, // i32 <gtid> + CGF.Builder.getInt32(RHSExprs.size()), // i32 <n> + ReductionArrayTySize, // size_type sizeof(RedList) + RL, // void *RedList + ReductionFn, // void (*) (void *, void *) <reduce_func> + Lock // kmp_critical_name *&<lock> + }; + auto Res = CGF.EmitRuntimeCall( + createRuntimeFunction(WithNowait ? OMPRTL__kmpc_reduce_nowait + : OMPRTL__kmpc_reduce), + Args); + + // 5. Build switch(res) + auto *DefaultBB = CGF.createBasicBlock(".omp.reduction.default"); + auto *SwInst = CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/2); + + // 6. Build case 1: + // ... + // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); + // ... + // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); + // break; + auto *Case1BB = CGF.createBasicBlock(".omp.reduction.case1"); + SwInst->addCase(CGF.Builder.getInt32(1), Case1BB); + CGF.EmitBlock(Case1BB); + + { + CodeGenFunction::RunCleanupsScope Scope(CGF); + // Add emission of __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); + llvm::Value *EndArgs[] = { + IdentTLoc, // ident_t *<loc> + ThreadId, // i32 <gtid> + Lock // kmp_critical_name *&<lock> + }; + CGF.EHStack + .pushCleanup<CallEndCleanup<std::extent<decltype(EndArgs)>::value>>( + NormalAndEHCleanup, + createRuntimeFunction(WithNowait ? OMPRTL__kmpc_end_reduce_nowait + : OMPRTL__kmpc_end_reduce), + llvm::makeArrayRef(EndArgs)); + for (auto *E : ReductionOps) { + CGF.EmitIgnoredExpr(E); + } + } + + CGF.EmitBranch(DefaultBB); + + // 7. Build case 2: + // ... + // Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); + // ... + // break; + auto *Case2BB = CGF.createBasicBlock(".omp.reduction.case2"); + SwInst->addCase(CGF.Builder.getInt32(2), Case2BB); + CGF.EmitBlock(Case2BB); + + { + CodeGenFunction::RunCleanupsScope Scope(CGF); + if (!WithNowait) { + // Add emission of __kmpc_end_reduce(<loc>, <gtid>, &<lock>); + llvm::Value *EndArgs[] = { + IdentTLoc, // ident_t *<loc> + ThreadId, // i32 <gtid> + Lock // kmp_critical_name *&<lock> + }; + CGF.EHStack + .pushCleanup<CallEndCleanup<std::extent<decltype(EndArgs)>::value>>( + NormalAndEHCleanup, + createRuntimeFunction(OMPRTL__kmpc_end_reduce), + llvm::makeArrayRef(EndArgs)); + } + auto I = LHSExprs.begin(); + for (auto *E : ReductionOps) { + const Expr *XExpr = nullptr; + const Expr *EExpr = nullptr; + const Expr *UpExpr = nullptr; + BinaryOperatorKind BO = BO_Comma; + if (auto *BO = dyn_cast<BinaryOperator>(E)) { + if (BO->getOpcode() == BO_Assign) { + XExpr = BO->getLHS(); + UpExpr = BO->getRHS(); + } + } + // Try to emit update expression as a simple atomic. + auto *RHSExpr = UpExpr; + if (RHSExpr) { + // Analyze RHS part of the whole expression. + if (auto *ACO = dyn_cast<AbstractConditionalOperator>( + RHSExpr->IgnoreParenImpCasts())) { + // If this is a conditional operator, analyze its condition for + // min/max reduction operator. + RHSExpr = ACO->getCond(); + } + if (auto *BORHS = + dyn_cast<BinaryOperator>(RHSExpr->IgnoreParenImpCasts())) { + EExpr = BORHS->getRHS(); + BO = BORHS->getOpcode(); + } + } + if (XExpr) { + auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()); + LValue X = CGF.EmitLValue(XExpr); + RValue E; + if (EExpr) + E = CGF.EmitAnyExpr(EExpr); + CGF.EmitOMPAtomicSimpleUpdateExpr( + X, E, BO, /*IsXLHSInRHSPart=*/true, llvm::Monotonic, Loc, + [&CGF, UpExpr, VD](RValue XRValue) { + CodeGenFunction::OMPPrivateScope PrivateScope(CGF); + PrivateScope.addPrivate( + VD, [&CGF, VD, XRValue]() -> llvm::Value *{ + auto *LHSTemp = CGF.CreateMemTemp(VD->getType()); + CGF.EmitStoreThroughLValue( + XRValue, + CGF.MakeNaturalAlignAddrLValue(LHSTemp, VD->getType())); + return LHSTemp; + }); + (void)PrivateScope.Privatize(); + return CGF.EmitAnyExpr(UpExpr); + }); + } else { + // Emit as a critical region. + emitCriticalRegion(CGF, ".atomic_reduction", [E](CodeGenFunction &CGF) { + CGF.EmitIgnoredExpr(E); + }, Loc); + } + ++I; + } + } + + CGF.EmitBranch(DefaultBB); + CGF.EmitBlock(DefaultBB, /*IsFinished=*/true); +} + +void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF, + SourceLocation Loc) { + // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 + // global_tid); + llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)}; + // Ignore return result until untied tasks are supported. + CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskwait), Args); +} + +void CGOpenMPRuntime::emitInlinedDirective(CodeGenFunction &CGF, + const RegionCodeGenTy &CodeGen) { + InlinedOpenMPRegionRAII Region(CGF, CodeGen); + CGF.CapturedStmtInfo->EmitBody(CGF, /*S=*/nullptr); +} + diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGOpenMPRuntime.h b/contrib/llvm/tools/clang/lib/CodeGen/CGOpenMPRuntime.h index 6daf817..f5aa4a5 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGOpenMPRuntime.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGOpenMPRuntime.h @@ -14,6 +14,7 @@ #ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H #define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H +#include "clang/AST/Type.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/DenseMap.h" @@ -42,9 +43,9 @@ namespace CodeGen { class CodeGenFunction; class CodeGenModule; -class CGOpenMPRuntime { -public: +typedef llvm::function_ref<void(CodeGenFunction &)> RegionCodeGenTy; +class CGOpenMPRuntime { private: enum OpenMPRTLFunction { /// \brief Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, @@ -67,11 +68,7 @@ private: // Call to kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32 // global_tid); OMPRTL__kmpc_cancel_barrier, - // Calls for static scheduling 'omp for' loops. - OMPRTL__kmpc_for_static_init_4, - OMPRTL__kmpc_for_static_init_4u, - OMPRTL__kmpc_for_static_init_8, - OMPRTL__kmpc_for_static_init_8u, + // Call to void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid); OMPRTL__kmpc_for_static_fini, // Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32 // global_tid); @@ -82,12 +79,58 @@ private: // Call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, // kmp_int32 num_threads); OMPRTL__kmpc_push_num_threads, - // Call to void __kmpc_flush(ident_t *loc, ...); + // Call to void __kmpc_flush(ident_t *loc); OMPRTL__kmpc_flush, // Call to kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid); OMPRTL__kmpc_master, // Call to void __kmpc_end_master(ident_t *, kmp_int32 global_tid); OMPRTL__kmpc_end_master, + // Call to kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid, + // int end_part); + OMPRTL__kmpc_omp_taskyield, + // Call to kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid); + OMPRTL__kmpc_single, + // Call to void __kmpc_end_single(ident_t *, kmp_int32 global_tid); + OMPRTL__kmpc_end_single, + // Call to kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid, + // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, + // kmp_routine_entry_t *task_entry); + OMPRTL__kmpc_omp_task_alloc, + // Call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t * + // new_task); + OMPRTL__kmpc_omp_task, + // Call to void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid, + // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *), + // kmp_int32 didit); + OMPRTL__kmpc_copyprivate, + // Call to kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid, + // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void + // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck); + OMPRTL__kmpc_reduce, + // Call to kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32 + // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, + // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name + // *lck); + OMPRTL__kmpc_reduce_nowait, + // Call to void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid, + // kmp_critical_name *lck); + OMPRTL__kmpc_end_reduce, + // Call to void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid, + // kmp_critical_name *lck); + OMPRTL__kmpc_end_reduce_nowait, + // Call to void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid, + // kmp_task_t * new_task); + OMPRTL__kmpc_omp_task_begin_if0, + // Call to void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid, + // kmp_task_t * new_task); + OMPRTL__kmpc_omp_task_complete_if0, + // Call to void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid); + OMPRTL__kmpc_ordered, + // Call to void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid); + OMPRTL__kmpc_end_ordered, + // Call to kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 + // global_tid); + OMPRTL__kmpc_omp_taskwait, }; /// \brief Values for bit flags used in the ident_t to describe the fields. @@ -118,7 +161,7 @@ private: /// \brief Map of flags and corresponding default locations. typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDefaultLocMapTy; OpenMPDefaultLocMapTy OpenMPDefaultLocMap; - llvm::Value *GetOrCreateDefaultOpenMPLocation(OpenMPLocationFlags Flags); + llvm::Value *getOrCreateDefaultLocation(OpenMPLocationFlags Flags); /// \brief Describes ident structure that describes a source location. /// All descriptions are taken from /// http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h @@ -186,13 +229,28 @@ private: /// variables. llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator> InternalVars; + /// \brief Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); + llvm::Type *KmpRoutineEntryPtrTy; + QualType KmpRoutineEntryPtrQTy; + /// \brief Type typedef struct kmp_task { + /// void * shareds; /**< pointer to block of pointers to + /// shared vars */ + /// kmp_routine_entry_t routine; /**< pointer to routine to call for + /// executing task */ + /// kmp_int32 part_id; /**< part id for the task */ + /// kmp_routine_entry_t destructors; /* pointer to function to invoke + /// deconstructors of firstprivate C++ objects */ + /// } kmp_task_t; + QualType KmpTaskTQTy; + + /// \brief Build type kmp_routine_entry_t (if not built yet). + void emitKmpRoutineEntryT(QualType KmpInt32Ty); /// \brief Emits object of ident_t type with info for source location. /// \param Flags Flags for OpenMP location. /// - llvm::Value * - EmitOpenMPUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc, - OpenMPLocationFlags Flags = OMP_IDENT_KMPC); + llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc, + OpenMPLocationFlags Flags = OMP_IDENT_KMPC); /// \brief Returns pointer to ident_t type. llvm::Type *getIdentTyPointerTy(); @@ -203,7 +261,23 @@ private: /// \brief Returns specified OpenMP runtime function. /// \param Function OpenMP runtime function. /// \return Specified function. - llvm::Constant *CreateRuntimeFunction(OpenMPRTLFunction Function); + llvm::Constant *createRuntimeFunction(OpenMPRTLFunction Function); + + /// \brief Returns __kmpc_for_static_init_* runtime function for the specified + /// size \a IVSize and sign \a IVSigned. + llvm::Constant *createForStaticInitFunction(unsigned IVSize, bool IVSigned); + + /// \brief Returns __kmpc_dispatch_init_* runtime function for the specified + /// size \a IVSize and sign \a IVSigned. + llvm::Constant *createDispatchInitFunction(unsigned IVSize, bool IVSigned); + + /// \brief Returns __kmpc_dispatch_next_* runtime function for the specified + /// size \a IVSize and sign \a IVSigned. + llvm::Constant *createDispatchNextFunction(unsigned IVSize, bool IVSigned); + + /// \brief Returns __kmpc_dispatch_fini_* runtime function for the specified + /// size \a IVSize and sign \a IVSigned. + llvm::Constant *createDispatchFiniFunction(unsigned IVSize, bool IVSigned); /// \brief If the specified mangled name is not in the module, create and /// return threadprivate cache object. This object is a pointer's worth of @@ -214,12 +288,12 @@ private: /// \brief Emits address of the word in a memory where current thread id is /// stored. - virtual llvm::Value *EmitThreadIDAddress(CodeGenFunction &CGF, + virtual llvm::Value *emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc); /// \brief Gets thread id value for the current thread. /// - llvm::Value *GetOpenMPThreadID(CodeGenFunction &CGF, SourceLocation Loc); + llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc); /// \brief Gets (if variable with the given name already exist) or creates /// internal global variable with the specified Name. The created variable has @@ -227,7 +301,7 @@ private: /// \param Ty Type of the global variable. If it is exist already the type /// must be the same. /// \param Name Name of the variable. - llvm::Constant *GetOrCreateInternalVariable(llvm::Type *Ty, + llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty, const llvm::Twine &Name); /// \brief Set of threadprivate variables with the generated initializer. @@ -239,78 +313,105 @@ private: /// \param CopyCtor Pointer to a global copy function for \a VD. /// \param Dtor Pointer to a global destructor function for \a VD. /// \param Loc Location of threadprivate declaration. - void EmitOMPThreadPrivateVarInit(CodeGenFunction &CGF, llvm::Value *VDAddr, - llvm::Value *Ctor, llvm::Value *CopyCtor, - llvm::Value *Dtor, SourceLocation Loc); + void emitThreadPrivateVarInit(CodeGenFunction &CGF, llvm::Value *VDAddr, + llvm::Value *Ctor, llvm::Value *CopyCtor, + llvm::Value *Dtor, SourceLocation Loc); /// \brief Returns corresponding lock object for the specified critical region /// name. If the lock object does not exist it is created, otherwise the /// reference to the existing copy is returned. /// \param CriticalName Name of the critical region. /// - llvm::Value *GetCriticalRegionLock(StringRef CriticalName); + llvm::Value *getCriticalRegionLock(StringRef CriticalName); public: explicit CGOpenMPRuntime(CodeGenModule &CGM); virtual ~CGOpenMPRuntime() {} + virtual void clear(); - /// \brief Emits outlined function for the specified OpenMP directive \a D - /// (required for parallel and task directives). This outlined function has - /// type void(*)(kmp_int32 /*ThreadID*/, kmp_int32 /*BoundID*/, struct - /// context_vars*). + /// \brief Emits outlined function for the specified OpenMP parallel directive + /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, + /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. - /// + /// \param CodeGen Code generation sequence for the \a D directive. virtual llvm::Value * - EmitOpenMPOutlinedFunction(const OMPExecutableDirective &D, - const VarDecl *ThreadIDVar); + emitParallelOutlinedFunction(const OMPExecutableDirective &D, + const VarDecl *ThreadIDVar, + const RegionCodeGenTy &CodeGen); - /// \brief Cleans up references to the objects in finished function. + /// \brief Emits outlined function for the OpenMP task directive \a D. This + /// outlined function has type void(*)(kmp_int32 ThreadID, kmp_int32 + /// PartID, struct context_vars*). + /// \param D OpenMP directive. + /// \param ThreadIDVar Variable for thread id in the current OpenMP region. + /// \param CodeGen Code generation sequence for the \a D directive. /// - void FunctionFinished(CodeGenFunction &CGF); + virtual llvm::Value *emitTaskOutlinedFunction(const OMPExecutableDirective &D, + const VarDecl *ThreadIDVar, + const RegionCodeGenTy &CodeGen); - /// \brief Emits code for parallel call of the \a OutlinedFn with variables - /// captured in a record which address is stored in \a CapturedStruct. - /// \param OutlinedFn Outlined function to be run in parallel threads. Type of - /// this function is void(*)(kmp_int32, kmp_int32, struct context_vars*). - /// \param CapturedStruct A pointer to the record with the references to - /// variables used in \a OutlinedFn function. + /// \brief Cleans up references to the objects in finished function. /// - virtual void EmitOMPParallelCall(CodeGenFunction &CGF, SourceLocation Loc, - llvm::Value *OutlinedFn, - llvm::Value *CapturedStruct); + void functionFinished(CodeGenFunction &CGF); - /// \brief Emits code for serial call of the \a OutlinedFn with variables - /// captured in a record which address is stored in \a CapturedStruct. - /// \param OutlinedFn Outlined function to be run in serial mode. + /// \brief Emits code for parallel or serial call of the \a OutlinedFn with + /// variables captured in a record which address is stored in \a + /// CapturedStruct. + /// \param OutlinedFn Outlined function to be run in parallel threads. Type of + /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedStruct A pointer to the record with the references to /// variables used in \a OutlinedFn function. + /// \param IfCond Condition in the associated 'if' clause, if it was + /// specified, nullptr otherwise. /// - virtual void EmitOMPSerialCall(CodeGenFunction &CGF, SourceLocation Loc, - llvm::Value *OutlinedFn, - llvm::Value *CapturedStruct); + virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, + llvm::Value *OutlinedFn, + llvm::Value *CapturedStruct, + const Expr *IfCond); /// \brief Emits a critical region. /// \param CriticalName Name of the critical region. /// \param CriticalOpGen Generator for the statement associated with the given /// critical region. - virtual void EmitOMPCriticalRegion(CodeGenFunction &CGF, - StringRef CriticalName, - const std::function<void()> &CriticalOpGen, - SourceLocation Loc); + virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, + const RegionCodeGenTy &CriticalOpGen, + SourceLocation Loc); /// \brief Emits a master region. /// \param MasterOpGen Generator for the statement associated with the given /// master region. - virtual void EmitOMPMasterRegion(CodeGenFunction &CGF, - const std::function<void()> &MasterOpGen, - SourceLocation Loc); + virtual void emitMasterRegion(CodeGenFunction &CGF, + const RegionCodeGenTy &MasterOpGen, + SourceLocation Loc); + + /// \brief Emits code for a taskyield directive. + virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc); + + /// \brief Emits a single region. + /// \param SingleOpGen Generator for the statement associated with the given + /// single region. + virtual void emitSingleRegion(CodeGenFunction &CGF, + const RegionCodeGenTy &SingleOpGen, + SourceLocation Loc, + ArrayRef<const Expr *> CopyprivateVars, + ArrayRef<const Expr *> DestExprs, + ArrayRef<const Expr *> SrcExprs, + ArrayRef<const Expr *> AssignmentOps); + + /// \brief Emit an ordered region. + /// \param OrderedOpGen Generator for the statement associated with the given + /// critical region. + virtual void emitOrderedRegion(CodeGenFunction &CGF, + const RegionCodeGenTy &OrderedOpGen, + SourceLocation Loc); - /// \brief Emits explicit barrier for OpenMP threads. - /// \param IsExplicit true, if it is explicitly specified barrier. + /// \brief Emit an implicit/explicit barrier for OpenMP threads. + /// \param Kind Directive for which this implicit barrier call must be + /// generated. Must be OMPD_barrier for explicit barrier generation. /// - virtual void EmitOMPBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, - bool IsExplicit = true); + virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, + OpenMPDirectiveKind Kind); /// \brief Check if the specified \a ScheduleKind is static non-chunked. /// This kind of worksharing directive is emitted without outer loop. @@ -320,6 +421,12 @@ public: virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const; + /// \brief Check if the specified \a ScheduleKind is dynamic. + /// This kind of worksharing directive is emitted without outer loop. + /// \param ScheduleKind Schedule Kind specified in the 'schedule' clause. + /// + virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const; + /// \brief Call the appropriate runtime routine to initialize it before start /// of loop. /// @@ -332,6 +439,7 @@ public: /// \param SchedKind Schedule kind, specified by the 'schedule' clause. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the interation variable. + /// \param Ordered true if loop is ordered, false otherwise. /// \param IL Address of the output variable in which the flag of the /// last iteration is returned. /// \param LB Address of the output variable in which the lower iteration @@ -343,29 +451,58 @@ public: /// \param Chunk Value of the chunk for the static_chunked scheduled loop. /// For the default (nullptr) value, the chunk 1 will be used. /// - virtual void EmitOMPForInit(CodeGenFunction &CGF, SourceLocation Loc, - OpenMPScheduleClauseKind SchedKind, - unsigned IVSize, bool IVSigned, llvm::Value *IL, - llvm::Value *LB, llvm::Value *UB, llvm::Value *ST, - llvm::Value *Chunk = nullptr); + virtual void emitForInit(CodeGenFunction &CGF, SourceLocation Loc, + OpenMPScheduleClauseKind SchedKind, unsigned IVSize, + bool IVSigned, bool Ordered, llvm::Value *IL, + llvm::Value *LB, llvm::Value *UB, llvm::Value *ST, + llvm::Value *Chunk = nullptr); + + /// \brief Call the appropriate runtime routine to notify that we finished + /// iteration of the ordered loop with the dynamic scheduling. + /// + /// \param CGF Reference to current CodeGenFunction. + /// \param Loc Clang source location. + /// \param IVSize Size of the iteration variable in bits. + /// \param IVSigned Sign of the interation variable. + /// + virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF, + SourceLocation Loc, unsigned IVSize, + bool IVSigned); /// \brief Call the appropriate runtime routine to notify that we finished /// all the work with current loop. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. - /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// - virtual void EmitOMPForFinish(CodeGenFunction &CGF, SourceLocation Loc, - OpenMPScheduleClauseKind ScheduleKind); + virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc); + + /// Call __kmpc_dispatch_next( + /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, + /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, + /// kmp_int[32|64] *p_stride); + /// \param IVSize Size of the iteration variable in bits. + /// \param IVSigned Sign of the interation variable. + /// \param IL Address of the output variable in which the flag of the + /// last iteration is returned. + /// \param LB Address of the output variable in which the lower iteration + /// number is returned. + /// \param UB Address of the output variable in which the upper iteration + /// number is returned. + /// \param ST Address of the output variable in which the stride value is + /// returned. + virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc, + unsigned IVSize, bool IVSigned, + llvm::Value *IL, llvm::Value *LB, + llvm::Value *UB, llvm::Value *ST); /// \brief Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. /// \param NumThreads An integer value of threads. - virtual void EmitOMPNumThreadsClause(CodeGenFunction &CGF, - llvm::Value *NumThreads, - SourceLocation Loc); + virtual void emitNumThreadsClause(CodeGenFunction &CGF, + llvm::Value *NumThreads, + SourceLocation Loc); /// \brief Returns address of the threadprivate variable for the current /// thread. @@ -373,10 +510,10 @@ public: /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. - virtual llvm::Value *getOMPAddrOfThreadPrivate(CodeGenFunction &CGF, - const VarDecl *VD, - llvm::Value *VDAddr, - SourceLocation Loc); + virtual llvm::Value *getAddrOfThreadPrivate(CodeGenFunction &CGF, + const VarDecl *VD, + llvm::Value *VDAddr, + SourceLocation Loc); /// \brief Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created @@ -387,15 +524,120 @@ public: /// \param Loc Location of threadprivate declaration. /// \param PerformInit true if initialization expression is not constant. virtual llvm::Function * - EmitOMPThreadPrivateVarDefinition(const VarDecl *VD, llvm::Value *VDAddr, - SourceLocation Loc, bool PerformInit, - CodeGenFunction *CGF = nullptr); + emitThreadPrivateVarDefinition(const VarDecl *VD, llvm::Value *VDAddr, + SourceLocation Loc, bool PerformInit, + CodeGenFunction *CGF = nullptr); /// \brief Emit flush of the variables specified in 'omp flush' directive. /// \param Vars List of variables to flush. - virtual void EmitOMPFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, - SourceLocation Loc); + virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, + SourceLocation Loc); + + /// \brief Emit task region for the task directive. The task region is + /// emitted in several steps: + /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 + /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, + /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the + /// function: + /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { + /// TaskFunction(gtid, tt->part_id, tt->shareds); + /// return 0; + /// } + /// 2. Copy a list of shared variables to field shareds of the resulting + /// structure kmp_task_t returned by the previous call (if any). + /// 3. Copy a pointer to destructions function to field destructions of the + /// resulting structure kmp_task_t. + /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, + /// kmp_task_t *new_task), where new_task is a resulting structure from + /// previous items. + /// \param D Current task directive. + /// \param Tied true if the task is tied (the task is tied to the thread that + /// can suspend its task region), false - untied (the task is not tied to any + /// thread). + /// \param Final Contains either constant bool value, or llvm::Value * of i1 + /// type for final clause. If the value is true, the task forces all of its + /// child tasks to become final and included tasks. + /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 + /// /*part_id*/, captured_struct */*__context*/); + /// \param SharedsTy A type which contains references the shared variables. + /// \param Shareds Context with the list of shared variables from the \a + /// TaskFunction. + /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr + /// otherwise. + /// \param PrivateVars List of references to private variables for the task + /// directive. + /// \param PrivateCopies List of private copies for each private variable in + /// \p PrivateVars. + /// \param FirstprivateVars List of references to private variables for the + /// task directive. + /// \param FirstprivateCopies List of private copies for each private variable + /// in \p FirstprivateVars. + /// \param FirstprivateInits List of references to auto generated variables + /// used for initialization of a single array element. Used if firstprivate + /// variable is of array type. + virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, + const OMPExecutableDirective &D, bool Tied, + llvm::PointerIntPair<llvm::Value *, 1, bool> Final, + llvm::Value *TaskFunction, QualType SharedsTy, + llvm::Value *Shareds, const Expr *IfCond, + const ArrayRef<const Expr *> PrivateVars, + const ArrayRef<const Expr *> PrivateCopies, + const ArrayRef<const Expr *> FirstprivateVars, + const ArrayRef<const Expr *> FirstprivateCopies, + const ArrayRef<const Expr *> FirstprivateInits); + + /// \brief Emit code for the directive that does not require outlining. + /// + /// \param CodeGen Code generation sequence for the \a D directive. + virtual void emitInlinedDirective(CodeGenFunction &CGF, + const RegionCodeGenTy &CodeGen); + /// \brief Emit a code for reduction clause. Next code should be emitted for + /// reduction: + /// \code + /// + /// static kmp_critical_name lock = { 0 }; + /// + /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) { + /// ... + /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); + /// ... + /// } + /// + /// ... + /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; + /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), + /// RedList, reduce_func, &<lock>)) { + /// case 1: + /// ... + /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); + /// ... + /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); + /// break; + /// case 2: + /// ... + /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); + /// ... + /// break; + /// default:; + /// } + /// \endcode + /// + /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. + /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. + /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' + /// or 'operator binop(LHS, RHS)'. + /// \param WithNowait true if parent directive has also nowait clause, false + /// otherwise. + virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, + ArrayRef<const Expr *> LHSExprs, + ArrayRef<const Expr *> RHSExprs, + ArrayRef<const Expr *> ReductionOps, + bool WithNowait); + + /// \brief Emit code for 'taskwait' directive. + virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc); }; + } // namespace CodeGen } // namespace clang diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h index 2de0b2f..c15f9fd 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h @@ -109,8 +109,8 @@ struct CGBitFieldInfo { class CGRecordLayout { friend class CodeGenTypes; - CGRecordLayout(const CGRecordLayout &) LLVM_DELETED_FUNCTION; - void operator=(const CGRecordLayout &) LLVM_DELETED_FUNCTION; + CGRecordLayout(const CGRecordLayout &) = delete; + void operator=(const CGRecordLayout &) = delete; private: /// The LLVM type corresponding to this record layout; used when diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp index 7ad394b..72ecd65 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp @@ -99,10 +99,25 @@ struct CGRecordLowering { MemberInfo StorageInfo(CharUnits Offset, llvm::Type *Data) { return MemberInfo(Offset, MemberInfo::Field, Data); } - bool useMSABI() { + + /// The Microsoft bitfield layout rule allocates discrete storage + /// units of the field's formal type and only combines adjacent + /// fields of the same formal type. We want to emit a layout with + /// these discrete storage units instead of combining them into a + /// continuous run. + bool isDiscreteBitFieldABI() { return Context.getTargetInfo().getCXXABI().isMicrosoft() || D->isMsStruct(Context); } + + /// The Itanium base layout rule allows virtual bases to overlap + /// other bases, which complicates layout in specific ways. + /// + /// Note specifically that the ms_struct attribute doesn't change this. + bool isOverlappingVBaseABI() { + return !Context.getTargetInfo().getCXXABI().isMicrosoft(); + } + /// \brief Wraps llvm::Type::getIntNTy with some implicit arguments. llvm::Type *getIntNType(uint64_t NumBits) { return llvm::Type::getIntNTy(Types.getLLVMContext(), @@ -119,8 +134,9 @@ struct CGRecordLowering { /// for itanium bitfields that are smaller than their declared type. llvm::Type *getStorageType(const FieldDecl *FD) { llvm::Type *Type = Types.ConvertTypeForMem(FD->getType()); - return useMSABI() || !FD->isBitField() ? Type : - getIntNType(std::min(FD->getBitWidthValue(Context), + if (!FD->isBitField()) return Type; + if (isDiscreteBitFieldABI()) return Type; + return getIntNType(std::min(FD->getBitWidthValue(Context), (unsigned)Context.toBits(getSize(Type)))); } /// \brief Gets the llvm Basesubobject type from a CXXRecordDecl. @@ -137,15 +153,10 @@ struct CGRecordLowering { return CharUnits::fromQuantity(DataLayout.getABITypeAlignment(Type)); } bool isZeroInitializable(const FieldDecl *FD) { - const Type *Type = FD->getType()->getBaseElementTypeUnsafe(); - if (const MemberPointerType *MPT = Type->getAs<MemberPointerType>()) - return Types.getCXXABI().isZeroInitializable(MPT); - if (const RecordType *RT = Type->getAs<RecordType>()) - return isZeroInitializable(RT->getDecl()); - return true; + return Types.isZeroInitializable(FD->getType()); } bool isZeroInitializable(const RecordDecl *RD) { - return Types.getCGRecordLayout(RD).isZeroInitializable(); + return Types.isZeroInitializable(RD); } void appendPaddingBytes(CharUnits Size) { if (!Size.isZero()) @@ -198,8 +209,8 @@ struct CGRecordLowering { bool IsZeroInitializableAsBase : 1; bool Packed : 1; private: - CGRecordLowering(const CGRecordLowering &) LLVM_DELETED_FUNCTION; - void operator =(const CGRecordLowering &) LLVM_DELETED_FUNCTION; + CGRecordLowering(const CGRecordLowering &) = delete; + void operator =(const CGRecordLowering &) = delete; }; } // namespace { @@ -365,7 +376,7 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field, // used to determine if the ASTRecordLayout is treating these two bitfields as // contiguous. StartBitOffset is offset of the beginning of the Run. uint64_t StartBitOffset, Tail = 0; - if (useMSABI()) { + if (isDiscreteBitFieldABI()) { for (; Field != FieldEnd; ++Field) { uint64_t BitOffset = getFieldBitOffset(*Field); // Zero-width bitfields end runs. @@ -438,8 +449,12 @@ void CGRecordLowering::accumulateBases() { for (const auto &Base : RD->bases()) { if (Base.isVirtual()) continue; + + // Bases can be zero-sized even if not technically empty if they + // contain only a trailing array member. const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl(); - if (!BaseDecl->isEmpty()) + if (!BaseDecl->isEmpty() && + !Context.getASTRecordLayout(BaseDecl).getSize().isZero()) Members.push_back(MemberInfo(Layout.getBaseClassOffset(BaseDecl), MemberInfo::Base, getStorageType(BaseDecl), BaseDecl)); } @@ -461,7 +476,7 @@ void CGRecordLowering::accumulateVBases() { // smaller than the nvsize. Here we check to see if such a base is placed // before the nvsize and set the scissor offset to that, instead of the // nvsize. - if (!useMSABI()) + if (isOverlappingVBaseABI()) for (const auto &Base : RD->vbases()) { const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl(); if (BaseDecl->isEmpty()) @@ -482,7 +497,8 @@ void CGRecordLowering::accumulateVBases() { CharUnits Offset = Layout.getVBaseClassOffset(BaseDecl); // If the vbase is a primary virtual base of some base, then it doesn't // get its own storage location but instead lives inside of that base. - if (!useMSABI() && Context.isNearlyEmpty(BaseDecl) && + if (isOverlappingVBaseABI() && + Context.isNearlyEmpty(BaseDecl) && !hasOwnStorage(RD, BaseDecl)) { Members.push_back(MemberInfo(Offset, MemberInfo::VBase, nullptr, BaseDecl)); diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp index e3bdf86..c879750 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp @@ -88,6 +88,7 @@ void CodeGenFunction::EmitStmt(const Stmt *S) { case Stmt::ContinueStmtClass: case Stmt::DefaultStmtClass: case Stmt::CaseStmtClass: + case Stmt::SEHLeaveStmtClass: llvm_unreachable("should have emitted these statements as simple"); #define STMT(Type, Base) @@ -173,9 +174,6 @@ void CodeGenFunction::EmitStmt(const Stmt *S) { case Stmt::SEHTryStmtClass: EmitSEHTryStmt(cast<SEHTryStmt>(*S)); break; - case Stmt::SEHLeaveStmtClass: - EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S)); - break; case Stmt::OMPParallelDirectiveClass: EmitOMPParallelDirective(cast<OMPParallelDirective>(*S)); break; @@ -256,6 +254,7 @@ bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) { case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break; case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break; case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break; + case Stmt::SEHLeaveStmtClass: EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S)); break; } return true; @@ -424,9 +423,8 @@ void CodeGenFunction::EmitLabel(const LabelDecl *D) { ResolveBranchFixups(Dest.getBlock()); } - RegionCounter Cnt = getPGORegionCounter(D->getStmt()); EmitBlock(Dest.getBlock()); - Cnt.beginRegion(Builder); + incrementProfileCounter(D->getStmt()); } /// Change the cleanup scope of the labels in this lexical scope to @@ -514,7 +512,6 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) { // C99 6.8.4.1: The first substatement is executed if the expression compares // unequal to 0. The condition must be a scalar type. LexicalScope ConditionScope(*this, S.getCond()->getSourceRange()); - RegionCounter Cnt = getPGORegionCounter(&S); if (S.getConditionVariable()) EmitAutoVarDecl(*S.getConditionVariable()); @@ -533,7 +530,7 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) { // This avoids emitting dead code and simplifies the CFG substantially. if (!ContainsLabel(Skipped)) { if (CondConstant) - Cnt.beginRegion(Builder); + incrementProfileCounter(&S); if (Executed) { RunCleanupsScope ExecutedScope(*this); EmitStmt(Executed); @@ -550,11 +547,12 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) { if (S.getElse()) ElseBlock = createBasicBlock("if.else"); - EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, Cnt.getCount()); + EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, + getProfileCount(S.getThen())); // Emit the 'then' code. EmitBlock(ThenBlock); - Cnt.beginRegion(Builder); + incrementProfileCounter(&S); { RunCleanupsScope ThenScope(*this); EmitStmt(S.getThen()); @@ -564,8 +562,8 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) { // Emit the 'else' code if present. if (const Stmt *Else = S.getElse()) { { - // There is no need to emit line number for unconditional branch. - ApplyDebugLocation DL(*this); + // There is no need to emit line number for an unconditional branch. + auto NL = ApplyDebugLocation::CreateEmpty(*this); EmitBlock(ElseBlock); } { @@ -573,8 +571,8 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) { EmitStmt(Else); } { - // There is no need to emit line number for unconditional branch. - ApplyDebugLocation DL(*this); + // There is no need to emit line number for an unconditional branch. + auto NL = ApplyDebugLocation::CreateEmpty(*this); EmitBranch(ContBlock); } } @@ -679,8 +677,6 @@ void CodeGenFunction::EmitCondBrHints(llvm::LLVMContext &Context, void CodeGenFunction::EmitWhileStmt(const WhileStmt &S, ArrayRef<const Attr *> WhileAttrs) { - RegionCounter Cnt = getPGORegionCounter(&S); - // Emit the header for the loop, which will also become // the continue target. JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond"); @@ -725,9 +721,9 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S, llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); if (ConditionScope.requiresCleanups()) ExitBlock = createBasicBlock("while.exit"); - llvm::BranchInst *CondBr = - Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, - PGO.createLoopWeights(S.getCond(), Cnt)); + llvm::BranchInst *CondBr = Builder.CreateCondBr( + BoolCondVal, LoopBody, ExitBlock, + createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()))); if (ExitBlock != LoopExit.getBlock()) { EmitBlock(ExitBlock); @@ -743,7 +739,7 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S, { RunCleanupsScope BodyScope(*this); EmitBlock(LoopBody); - Cnt.beginRegion(Builder); + incrementProfileCounter(&S); EmitStmt(S.getBody()); } @@ -772,7 +768,7 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S, JumpDest LoopExit = getJumpDestInCurrentScope("do.end"); JumpDest LoopCond = getJumpDestInCurrentScope("do.cond"); - RegionCounter Cnt = getPGORegionCounter(&S); + uint64_t ParentCount = getCurrentProfileCount(); // Store the blocks to use for break and continue. BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond)); @@ -782,7 +778,7 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S, LoopStack.push(LoopBody); - EmitBlockWithFallThrough(LoopBody, Cnt); + EmitBlockWithFallThrough(LoopBody, &S); { RunCleanupsScope BodyScope(*this); EmitStmt(S.getBody()); @@ -809,9 +805,10 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S, // As long as the condition is true, iterate the loop. if (EmitBoolCondBranch) { - llvm::BranchInst *CondBr = - Builder.CreateCondBr(BoolCondVal, LoopBody, LoopExit.getBlock(), - PGO.createLoopWeights(S.getCond(), Cnt)); + uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount; + llvm::BranchInst *CondBr = Builder.CreateCondBr( + BoolCondVal, LoopBody, LoopExit.getBlock(), + createProfileWeightsForLoop(S.getCond(), BackedgeCount)); // Attach metadata to loop body conditional branch. EmitCondBrHints(LoopBody->getContext(), CondBr, DoAttrs); @@ -838,8 +835,6 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S, if (S.getInit()) EmitStmt(S.getInit()); - RegionCounter Cnt = getPGORegionCounter(&S); - // Start the loop with a block that tests the condition. // If there's an increment, the continue scope will be overwritten // later. @@ -881,9 +876,9 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S, // C99 6.8.5p2/p4: The first substatement is executed if the expression // compares unequal to 0. The condition must be a scalar type. llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); - llvm::BranchInst *CondBr = - Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, - PGO.createLoopWeights(S.getCond(), Cnt)); + llvm::BranchInst *CondBr = Builder.CreateCondBr( + BoolCondVal, ForBody, ExitBlock, + createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()))); // Attach metadata to loop body conditional branch. EmitCondBrHints(ForBody->getContext(), CondBr, ForAttrs); @@ -898,7 +893,7 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S, // Treat it as a non-zero constant. Don't even create a new block for the // body, just fall into it. } - Cnt.beginRegion(Builder); + incrementProfileCounter(&S); { // Create a separate cleanup scope for the body, in case it is not @@ -939,8 +934,6 @@ CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S, EmitStmt(S.getRangeStmt()); EmitStmt(S.getBeginEndStmt()); - RegionCounter Cnt = getPGORegionCounter(&S); - // Start the loop with a block that tests the condition. // If there's an increment, the continue scope will be overwritten // later. @@ -962,7 +955,8 @@ CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S, // to bool, is true. llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); llvm::BranchInst *CondBr = Builder.CreateCondBr( - BoolCondVal, ForBody, ExitBlock, PGO.createLoopWeights(S.getCond(), Cnt)); + BoolCondVal, ForBody, ExitBlock, + createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()))); // Attach metadata to loop body conditional branch. EmitCondBrHints(ForBody->getContext(), CondBr, ForAttrs); @@ -973,7 +967,7 @@ CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S, } EmitBlock(ForBody); - Cnt.beginRegion(Builder); + incrementProfileCounter(&S); // Create a block for the increment. In case of a 'continue', we jump there. JumpDest Continue = getJumpDestInCurrentScope("for.inc"); @@ -1022,6 +1016,12 @@ void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) { /// if the function returns void, or may be missing one if the function returns /// non-void. Fun stuff :). void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) { + // Returning from an outlined SEH helper is UB, and we already warn on it. + if (IsOutlinedSEHHelper) { + Builder.CreateUnreachable(); + Builder.ClearInsertionPoint(); + } + // Emit the result value, even if unused, to evalute the side effects. const Expr *RV = S.getRetValue(); @@ -1133,13 +1133,11 @@ void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) { llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext()); llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext()); - RegionCounter CaseCnt = getPGORegionCounter(&S); - // Emit the code for this case. We do this first to make sure it is // properly chained from our predecessor before generating the // switch machinery to enter this block. llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb"); - EmitBlockWithFallThrough(CaseDest, CaseCnt); + EmitBlockWithFallThrough(CaseDest, &S); EmitStmt(S.getSubStmt()); // If range is empty, do nothing. @@ -1150,7 +1148,7 @@ void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) { // FIXME: parameters such as this should not be hardcoded. if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) { // Range is small enough to add multiple switch instruction cases. - uint64_t Total = CaseCnt.getCount(); + uint64_t Total = getProfileCount(&S); unsigned NCases = Range.getZExtValue() + 1; // We only have one region counter for the entire set of cases here, so we // need to divide the weights evenly between the generated cases, ensuring @@ -1189,9 +1187,9 @@ void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) { llvm::MDNode *Weights = nullptr; if (SwitchWeights) { - uint64_t ThisCount = CaseCnt.getCount(); + uint64_t ThisCount = getProfileCount(&S); uint64_t DefaultCount = (*SwitchWeights)[0]; - Weights = PGO.createBranchWeights(ThisCount, DefaultCount); + Weights = createProfileWeights(ThisCount, DefaultCount); // Since we're chaining the switch default through each large case range, we // need to update the weight for the default, ie, the first case, to include @@ -1224,7 +1222,6 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) { return; } - RegionCounter CaseCnt = getPGORegionCounter(&S); llvm::ConstantInt *CaseVal = Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext())); @@ -1239,7 +1236,7 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) { // Only do this optimization if there are no cleanups that need emitting. if (isObviouslyBranchWithoutCleanups(Block)) { if (SwitchWeights) - SwitchWeights->push_back(CaseCnt.getCount()); + SwitchWeights->push_back(getProfileCount(&S)); SwitchInsn->addCase(CaseVal, Block.getBlock()); // If there was a fallthrough into this case, make sure to redirect it to @@ -1253,9 +1250,9 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) { } llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb"); - EmitBlockWithFallThrough(CaseDest, CaseCnt); + EmitBlockWithFallThrough(CaseDest, &S); if (SwitchWeights) - SwitchWeights->push_back(CaseCnt.getCount()); + SwitchWeights->push_back(getProfileCount(&S)); SwitchInsn->addCase(CaseVal, CaseDest); // Recursively emitting the statement is acceptable, but is not wonderful for @@ -1276,12 +1273,11 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) { llvm::ConstantInt *CaseVal = Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext())); - CaseCnt = getPGORegionCounter(NextCase); if (SwitchWeights) - SwitchWeights->push_back(CaseCnt.getCount()); + SwitchWeights->push_back(getProfileCount(NextCase)); if (CGM.getCodeGenOpts().ProfileInstrGenerate) { CaseDest = createBasicBlock("sw.bb"); - EmitBlockWithFallThrough(CaseDest, CaseCnt); + EmitBlockWithFallThrough(CaseDest, &S); } SwitchInsn->addCase(CaseVal, CaseDest); @@ -1297,8 +1293,7 @@ void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) { assert(DefaultBlock->empty() && "EmitDefaultStmt: Default block already defined?"); - RegionCounter Cnt = getPGORegionCounter(&S); - EmitBlockWithFallThrough(DefaultBlock, Cnt); + EmitBlockWithFallThrough(DefaultBlock, &S); EmitStmt(S.getSubStmt()); } @@ -1520,10 +1515,8 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) { const SwitchCase *Case = nullptr; if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts, getContext(), Case)) { - if (Case) { - RegionCounter CaseCnt = getPGORegionCounter(Case); - CaseCnt.beginRegion(Builder); - } + if (Case) + incrementProfileCounter(Case); RunCleanupsScope ExecutedScope(*this); // Emit the condition variable if needed inside the entire cleanup scope @@ -1540,8 +1533,7 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) { // specified series of statements and we're good. for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i) EmitStmt(CaseStmts[i]); - RegionCounter ExitCnt = getPGORegionCounter(&S); - ExitCnt.beginRegion(Builder); + incrementProfileCounter(&S); // Now we want to restore the saved switch instance so that nested // switches continue to function properly @@ -1572,7 +1564,7 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) { Case; Case = Case->getNextSwitchCase()) { if (isa<DefaultStmt>(Case)) - DefaultCount = getPGORegionCounter(Case).getCount(); + DefaultCount = getProfileCount(Case); NumCases += 1; } SwitchWeights = new SmallVector<uint64_t, 16>(); @@ -1621,8 +1613,7 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) { // Emit continuation. EmitBlock(SwitchExit.getBlock(), true); - RegionCounter ExitCnt = getPGORegionCounter(&S); - ExitCnt.beginRegion(Builder); + incrementProfileCounter(&S); if (SwitchWeights) { assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() && @@ -1630,7 +1621,7 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) { // If there's only one jump destination there's no sense weighting it. if (SwitchWeights->size() > 1) SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof, - PGO.createBranchWeights(*SwitchWeights)); + createProfileWeights(*SwitchWeights)); delete SwitchWeights; } SwitchInsn = SavedSwitchInsn; @@ -1696,7 +1687,7 @@ SimplifyConstraint(const char *Constraint, const TargetInfo &Target, static std::string AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, const TargetInfo &Target, CodeGenModule &CGM, - const AsmStmt &Stmt) { + const AsmStmt &Stmt, const bool EarlyClobber) { const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr); if (!AsmDeclRef) return Constraint; @@ -1721,7 +1712,7 @@ AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, } // Canonicalize the register here before returning it. Register = Target.getNormalizedGCCRegisterName(Register); - return "{" + Register.str() + "}"; + return (EarlyClobber ? "&{" : "{") + Register.str() + "}"; } llvm::Value* @@ -1854,7 +1845,8 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { OutExpr = OutExpr->IgnoreParenNoopCasts(getContext()); OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr, - getTarget(), CGM, S); + getTarget(), CGM, S, + Info.earlyClobber()); LValue Dest = EmitLValue(OutExpr); if (!Constraints.empty()) @@ -1956,10 +1948,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(), &OutputConstraintInfos); - InputConstraint = - AddVariableConstraints(InputConstraint, - *InputExpr->IgnoreParenNoopCasts(getContext()), - getTarget(), CGM, S); + InputConstraint = AddVariableConstraints( + InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()), + getTarget(), CGM, S, false /* No EarlyClobber */); llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints); @@ -2187,6 +2178,8 @@ CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) { llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage, CapturedStmtInfo->getHelperName(), &CGM.getModule()); CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); + if (CD->isNothrow()) + F->addFnAttr(llvm::Attribute::NoUnwind); // Generate the function. StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGStmtOpenMP.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGStmtOpenMP.cpp index 78fd37c..07fc6e9 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGStmtOpenMP.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGStmtOpenMP.cpp @@ -20,258 +20,473 @@ using namespace clang; using namespace CodeGen; -namespace { -/// \brief RAII for emitting code of CapturedStmt without function outlining. -class InlinedOpenMPRegion { - CodeGenFunction &CGF; - CodeGenFunction::CGCapturedStmtInfo *PrevCapturedStmtInfo; - const Decl *StoredCurCodeDecl; - - /// \brief A class to emit CapturedStmt construct as inlined statement without - /// generating a function for outlined code. - class CGInlinedOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo { - public: - CGInlinedOpenMPRegionInfo() : CGCapturedStmtInfo() {} - }; - -public: - InlinedOpenMPRegion(CodeGenFunction &CGF, const Stmt *S) - : CGF(CGF), PrevCapturedStmtInfo(CGF.CapturedStmtInfo), - StoredCurCodeDecl(CGF.CurCodeDecl) { - CGF.CurCodeDecl = cast<CapturedStmt>(S)->getCapturedDecl(); - CGF.CapturedStmtInfo = new CGInlinedOpenMPRegionInfo(); - } - ~InlinedOpenMPRegion() { - delete CGF.CapturedStmtInfo; - CGF.CapturedStmtInfo = PrevCapturedStmtInfo; - CGF.CurCodeDecl = StoredCurCodeDecl; - } -}; -} // namespace - //===----------------------------------------------------------------------===// // OpenMP Directive Emission //===----------------------------------------------------------------------===// +void CodeGenFunction::EmitOMPAggregateAssign( + llvm::Value *DestAddr, llvm::Value *SrcAddr, QualType OriginalType, + const llvm::function_ref<void(llvm::Value *, llvm::Value *)> &CopyGen) { + // Perform element-by-element initialization. + QualType ElementTy; + auto SrcBegin = SrcAddr; + auto DestBegin = DestAddr; + auto ArrayTy = OriginalType->getAsArrayTypeUnsafe(); + auto NumElements = emitArrayLength(ArrayTy, ElementTy, DestBegin); + // Cast from pointer to array type to pointer to single element. + SrcBegin = Builder.CreatePointerBitCastOrAddrSpaceCast(SrcBegin, + DestBegin->getType()); + auto DestEnd = Builder.CreateGEP(DestBegin, NumElements); + // The basic structure here is a while-do loop. + auto BodyBB = createBasicBlock("omp.arraycpy.body"); + auto DoneBB = createBasicBlock("omp.arraycpy.done"); + auto IsEmpty = + Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty"); + Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); + + // Enter the loop body, making that address the current address. + auto EntryBB = Builder.GetInsertBlock(); + EmitBlock(BodyBB); + auto SrcElementCurrent = + Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast"); + SrcElementCurrent->addIncoming(SrcBegin, EntryBB); + auto DestElementCurrent = Builder.CreatePHI(DestBegin->getType(), 2, + "omp.arraycpy.destElementPast"); + DestElementCurrent->addIncoming(DestBegin, EntryBB); + + // Emit copy. + CopyGen(DestElementCurrent, SrcElementCurrent); + + // Shift the address forward by one element. + auto DestElementNext = Builder.CreateConstGEP1_32( + DestElementCurrent, /*Idx0=*/1, "omp.arraycpy.dest.element"); + auto SrcElementNext = Builder.CreateConstGEP1_32( + SrcElementCurrent, /*Idx0=*/1, "omp.arraycpy.src.element"); + // Check whether we've reached the end. + auto Done = + Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done"); + Builder.CreateCondBr(Done, DoneBB, BodyBB); + DestElementCurrent->addIncoming(DestElementNext, Builder.GetInsertBlock()); + SrcElementCurrent->addIncoming(SrcElementNext, Builder.GetInsertBlock()); + + // Done. + EmitBlock(DoneBB, /*IsFinished=*/true); +} -/// \brief Emits code for OpenMP 'if' clause using specified \a CodeGen -/// function. Here is the logic: -/// if (Cond) { -/// CodeGen(true); -/// } else { -/// CodeGen(false); -/// } -static void EmitOMPIfClause(CodeGenFunction &CGF, const Expr *Cond, - const std::function<void(bool)> &CodeGen) { - CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange()); - - // If the condition constant folds and can be elided, try to avoid emitting - // the condition and the dead arm of the if/else. - bool CondConstant; - if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) { - CodeGen(CondConstant); - return; - } - - // Otherwise, the condition did not fold, or we couldn't elide it. Just - // emit the conditional branch. - auto ThenBlock = CGF.createBasicBlock(/*name*/ "omp_if.then"); - auto ElseBlock = CGF.createBasicBlock(/*name*/ "omp_if.else"); - auto ContBlock = CGF.createBasicBlock(/*name*/ "omp_if.end"); - CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount*/ 0); - - // Emit the 'then' code. - CGF.EmitBlock(ThenBlock); - CodeGen(/*ThenBlock*/ true); - CGF.EmitBranch(ContBlock); - // Emit the 'else' code if present. - { - // There is no need to emit line number for unconditional branch. - ApplyDebugLocation DL(CGF); - CGF.EmitBlock(ElseBlock); - } - CodeGen(/*ThenBlock*/ false); - { - // There is no need to emit line number for unconditional branch. - ApplyDebugLocation DL(CGF); - CGF.EmitBranch(ContBlock); - } - // Emit the continuation block for code after the if. - CGF.EmitBlock(ContBlock, /*IsFinished*/ true); -} - -void CodeGenFunction::EmitOMPAggregateAssign(LValue OriginalAddr, - llvm::Value *PrivateAddr, - const Expr *AssignExpr, - QualType OriginalType, - const VarDecl *VDInit) { - EmitBlock(createBasicBlock(".omp.assign.begin.")); - if (!isa<CXXConstructExpr>(AssignExpr) || isTrivialInitializer(AssignExpr)) { - // Perform simple memcpy. - EmitAggregateAssign(PrivateAddr, OriginalAddr.getAddress(), - AssignExpr->getType()); - } else { - // Perform element-by-element initialization. - QualType ElementTy; - auto SrcBegin = OriginalAddr.getAddress(); - auto DestBegin = PrivateAddr; - auto ArrayTy = OriginalType->getAsArrayTypeUnsafe(); - auto SrcNumElements = emitArrayLength(ArrayTy, ElementTy, SrcBegin); - auto DestNumElements = emitArrayLength(ArrayTy, ElementTy, DestBegin); - auto SrcEnd = Builder.CreateGEP(SrcBegin, SrcNumElements); - auto DestEnd = Builder.CreateGEP(DestBegin, DestNumElements); - // The basic structure here is a do-while loop, because we don't - // need to check for the zero-element case. - auto BodyBB = createBasicBlock("omp.arraycpy.body"); - auto DoneBB = createBasicBlock("omp.arraycpy.done"); - auto IsEmpty = - Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty"); - Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); - - // Enter the loop body, making that address the current address. - auto EntryBB = Builder.GetInsertBlock(); - EmitBlock(BodyBB); - auto SrcElementPast = Builder.CreatePHI(SrcBegin->getType(), 2, - "omp.arraycpy.srcElementPast"); - SrcElementPast->addIncoming(SrcEnd, EntryBB); - auto DestElementPast = Builder.CreatePHI(DestBegin->getType(), 2, - "omp.arraycpy.destElementPast"); - DestElementPast->addIncoming(DestEnd, EntryBB); - - // Shift the address back by one element. - auto NegativeOne = llvm::ConstantInt::get(SizeTy, -1, true); - auto DestElement = Builder.CreateGEP(DestElementPast, NegativeOne, - "omp.arraycpy.dest.element"); - auto SrcElement = Builder.CreateGEP(SrcElementPast, NegativeOne, - "omp.arraycpy.src.element"); - { - // Create RunCleanScope to cleanup possible temps. - CodeGenFunction::RunCleanupsScope Init(*this); - // Emit initialization for single element. - LocalDeclMap[VDInit] = SrcElement; - EmitAnyExprToMem(AssignExpr, DestElement, - AssignExpr->getType().getQualifiers(), - /*IsInitializer*/ false); - LocalDeclMap.erase(VDInit); +void CodeGenFunction::EmitOMPCopy(CodeGenFunction &CGF, + QualType OriginalType, llvm::Value *DestAddr, + llvm::Value *SrcAddr, const VarDecl *DestVD, + const VarDecl *SrcVD, const Expr *Copy) { + if (OriginalType->isArrayType()) { + auto *BO = dyn_cast<BinaryOperator>(Copy); + if (BO && BO->getOpcode() == BO_Assign) { + // Perform simple memcpy for simple copying. + CGF.EmitAggregateAssign(DestAddr, SrcAddr, OriginalType); + } else { + // For arrays with complex element types perform element by element + // copying. + CGF.EmitOMPAggregateAssign( + DestAddr, SrcAddr, OriginalType, + [&CGF, Copy, SrcVD, DestVD](llvm::Value *DestElement, + llvm::Value *SrcElement) { + // Working with the single array element, so have to remap + // destination and source variables to corresponding array + // elements. + CodeGenFunction::OMPPrivateScope Remap(CGF); + Remap.addPrivate(DestVD, [DestElement]() -> llvm::Value *{ + return DestElement; + }); + Remap.addPrivate( + SrcVD, [SrcElement]() -> llvm::Value *{ return SrcElement; }); + (void)Remap.Privatize(); + CGF.EmitIgnoredExpr(Copy); + }); } - - // Check whether we've reached the end. - auto Done = - Builder.CreateICmpEQ(DestElement, DestBegin, "omp.arraycpy.done"); - Builder.CreateCondBr(Done, DoneBB, BodyBB); - DestElementPast->addIncoming(DestElement, Builder.GetInsertBlock()); - SrcElementPast->addIncoming(SrcElement, Builder.GetInsertBlock()); - - // Done. - EmitBlock(DoneBB, true); + } else { + // Remap pseudo source variable to private copy. + CodeGenFunction::OMPPrivateScope Remap(CGF); + Remap.addPrivate(SrcVD, [SrcAddr]() -> llvm::Value *{ return SrcAddr; }); + Remap.addPrivate(DestVD, [DestAddr]() -> llvm::Value *{ return DestAddr; }); + (void)Remap.Privatize(); + // Emit copying of the whole variable. + CGF.EmitIgnoredExpr(Copy); } - EmitBlock(createBasicBlock(".omp.assign.end.")); } -void CodeGenFunction::EmitOMPFirstprivateClause( - const OMPExecutableDirective &D, - CodeGenFunction::OMPPrivateScope &PrivateScope) { - auto PrivateFilter = [](const OMPClause *C) -> bool { - return C->getClauseKind() == OMPC_firstprivate; - }; - for (OMPExecutableDirective::filtered_clause_iterator<decltype(PrivateFilter)> - I(D.clauses(), PrivateFilter); I; ++I) { +bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D, + OMPPrivateScope &PrivateScope) { + llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate; + for (auto &&I = D.getClausesOfKind(OMPC_firstprivate); I; ++I) { auto *C = cast<OMPFirstprivateClause>(*I); auto IRef = C->varlist_begin(); auto InitsRef = C->inits().begin(); for (auto IInit : C->private_copies()) { auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); - auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); - bool IsRegistered; - if (*InitsRef != nullptr) { - // Emit VarDecl with copy init for arrays. - auto *FD = CapturedStmtInfo->lookup(OrigVD); - LValue Base = MakeNaturalAlignAddrLValue( - CapturedStmtInfo->getContextValue(), - getContext().getTagDeclType(FD->getParent())); - auto OriginalAddr = EmitLValueForField(Base, FD); - auto VDInit = cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl()); - IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value * { - auto Emission = EmitAutoVarAlloca(*VD); - // Emit initialization of aggregate firstprivate vars. - EmitOMPAggregateAssign(OriginalAddr, Emission.getAllocatedAddress(), - VD->getInit(), (*IRef)->getType(), VDInit); - EmitAutoVarCleanups(Emission); - return Emission.getAllocatedAddress(); - }); - } else - IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value * { - // Emit private VarDecl with copy init. - EmitDecl(*VD); - return GetAddrOfLocalVar(VD); - }); - assert(IsRegistered && "counter already registered as private"); - // Silence the warning about unused variable. - (void)IsRegistered; + if (EmittedAsFirstprivate.count(OrigVD) == 0) { + EmittedAsFirstprivate.insert(OrigVD); + auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); + auto *VDInit = cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl()); + bool IsRegistered; + DeclRefExpr DRE( + const_cast<VarDecl *>(OrigVD), + /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup( + OrigVD) != nullptr, + (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); + auto *OriginalAddr = EmitLValue(&DRE).getAddress(); + QualType Type = OrigVD->getType(); + if (Type->isArrayType()) { + // Emit VarDecl with copy init for arrays. + // Get the address of the original variable captured in current + // captured region. + IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{ + auto Emission = EmitAutoVarAlloca(*VD); + auto *Init = VD->getInit(); + if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) { + // Perform simple memcpy. + EmitAggregateAssign(Emission.getAllocatedAddress(), OriginalAddr, + Type); + } else { + EmitOMPAggregateAssign( + Emission.getAllocatedAddress(), OriginalAddr, Type, + [this, VDInit, Init](llvm::Value *DestElement, + llvm::Value *SrcElement) { + // Clean up any temporaries needed by the initialization. + RunCleanupsScope InitScope(*this); + // Emit initialization for single element. + LocalDeclMap[VDInit] = SrcElement; + EmitAnyExprToMem(Init, DestElement, + Init->getType().getQualifiers(), + /*IsInitializer*/ false); + LocalDeclMap.erase(VDInit); + }); + } + EmitAutoVarCleanups(Emission); + return Emission.getAllocatedAddress(); + }); + } else { + IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{ + // Emit private VarDecl with copy init. + // Remap temp VDInit variable to the address of the original + // variable + // (for proper handling of captured global variables). + LocalDeclMap[VDInit] = OriginalAddr; + EmitDecl(*VD); + LocalDeclMap.erase(VDInit); + return GetAddrOfLocalVar(VD); + }); + } + assert(IsRegistered && + "firstprivate var already registered as private"); + // Silence the warning about unused variable. + (void)IsRegistered; + } ++IRef, ++InitsRef; } } + return !EmittedAsFirstprivate.empty(); } void CodeGenFunction::EmitOMPPrivateClause( const OMPExecutableDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) { - auto PrivateFilter = [](const OMPClause *C) -> bool { - return C->getClauseKind() == OMPC_private; - }; - for (OMPExecutableDirective::filtered_clause_iterator<decltype(PrivateFilter)> - I(D.clauses(), PrivateFilter); I; ++I) { + llvm::DenseSet<const VarDecl *> EmittedAsPrivate; + for (auto &&I = D.getClausesOfKind(OMPC_private); I; ++I) { auto *C = cast<OMPPrivateClause>(*I); auto IRef = C->varlist_begin(); for (auto IInit : C->private_copies()) { auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); - auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); + if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { + auto VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); + bool IsRegistered = + PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{ + // Emit private VarDecl with copy init. + EmitDecl(*VD); + return GetAddrOfLocalVar(VD); + }); + assert(IsRegistered && "private var already registered as private"); + // Silence the warning about unused variable. + (void)IsRegistered; + } + ++IRef; + } + } +} + +bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) { + // threadprivate_var1 = master_threadprivate_var1; + // operator=(threadprivate_var2, master_threadprivate_var2); + // ... + // __kmpc_barrier(&loc, global_tid); + llvm::DenseSet<const VarDecl *> CopiedVars; + llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr; + for (auto &&I = D.getClausesOfKind(OMPC_copyin); I; ++I) { + auto *C = cast<OMPCopyinClause>(*I); + auto IRef = C->varlist_begin(); + auto ISrcRef = C->source_exprs().begin(); + auto IDestRef = C->destination_exprs().begin(); + for (auto *AssignOp : C->assignment_ops()) { + auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); + QualType Type = VD->getType(); + if (CopiedVars.insert(VD->getCanonicalDecl()).second) { + // Get the address of the master variable. + auto *MasterAddr = VD->isStaticLocal() + ? CGM.getStaticLocalDeclAddress(VD) + : CGM.GetAddrOfGlobal(VD); + // Get the address of the threadprivate variable. + auto *PrivateAddr = EmitLValue(*IRef).getAddress(); + if (CopiedVars.size() == 1) { + // At first check if current thread is a master thread. If it is, no + // need to copy data. + CopyBegin = createBasicBlock("copyin.not.master"); + CopyEnd = createBasicBlock("copyin.not.master.end"); + Builder.CreateCondBr( + Builder.CreateICmpNE( + Builder.CreatePtrToInt(MasterAddr, CGM.IntPtrTy), + Builder.CreatePtrToInt(PrivateAddr, CGM.IntPtrTy)), + CopyBegin, CopyEnd); + EmitBlock(CopyBegin); + } + auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); + auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); + EmitOMPCopy(*this, Type, PrivateAddr, MasterAddr, DestVD, SrcVD, + AssignOp); + } + ++IRef; + ++ISrcRef; + ++IDestRef; + } + } + if (CopyEnd) { + // Exit out of copying procedure for non-master thread. + EmitBlock(CopyEnd, /*IsFinished=*/true); + return true; + } + return false; +} + +bool CodeGenFunction::EmitOMPLastprivateClauseInit( + const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) { + bool HasAtLeastOneLastprivate = false; + llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; + for (auto &&I = D.getClausesOfKind(OMPC_lastprivate); I; ++I) { + HasAtLeastOneLastprivate = true; + auto *C = cast<OMPLastprivateClause>(*I); + auto IRef = C->varlist_begin(); + auto IDestRef = C->destination_exprs().begin(); + for (auto *IInit : C->private_copies()) { + // Keep the address of the original variable for future update at the end + // of the loop. + auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); + if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) { + auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); + PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() -> llvm::Value *{ + DeclRefExpr DRE( + const_cast<VarDecl *>(OrigVD), + /*RefersToEnclosingVariableOrCapture=*/CapturedStmtInfo->lookup( + OrigVD) != nullptr, + (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc()); + return EmitLValue(&DRE).getAddress(); + }); + // Check if the variable is also a firstprivate: in this case IInit is + // not generated. Initialization of this variable will happen in codegen + // for 'firstprivate' clause. + if (IInit) { + auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl()); + bool IsRegistered = + PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value *{ + // Emit private VarDecl with copy init. + EmitDecl(*VD); + return GetAddrOfLocalVar(VD); + }); + assert(IsRegistered && + "lastprivate var already registered as private"); + (void)IsRegistered; + } + } + ++IRef, ++IDestRef; + } + } + return HasAtLeastOneLastprivate; +} + +void CodeGenFunction::EmitOMPLastprivateClauseFinal( + const OMPExecutableDirective &D, llvm::Value *IsLastIterCond) { + // Emit following code: + // if (<IsLastIterCond>) { + // orig_var1 = private_orig_var1; + // ... + // orig_varn = private_orig_varn; + // } + auto *ThenBB = createBasicBlock(".omp.lastprivate.then"); + auto *DoneBB = createBasicBlock(".omp.lastprivate.done"); + Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB); + EmitBlock(ThenBB); + llvm::DenseMap<const Decl *, const Expr *> LoopCountersAndUpdates; + const Expr *LastIterVal = nullptr; + const Expr *IVExpr = nullptr; + const Expr *IncExpr = nullptr; + if (auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) { + LastIterVal = + cast<VarDecl>(cast<DeclRefExpr>(LoopDirective->getUpperBoundVariable()) + ->getDecl()) + ->getAnyInitializer(); + IVExpr = LoopDirective->getIterationVariable(); + IncExpr = LoopDirective->getInc(); + auto IUpdate = LoopDirective->updates().begin(); + for (auto *E : LoopDirective->counters()) { + auto *D = cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl(); + LoopCountersAndUpdates[D] = *IUpdate; + ++IUpdate; + } + } + { + llvm::DenseSet<const VarDecl *> AlreadyEmittedVars; + bool FirstLCV = true; + for (auto &&I = D.getClausesOfKind(OMPC_lastprivate); I; ++I) { + auto *C = cast<OMPLastprivateClause>(*I); + auto IRef = C->varlist_begin(); + auto ISrcRef = C->source_exprs().begin(); + auto IDestRef = C->destination_exprs().begin(); + for (auto *AssignOp : C->assignment_ops()) { + auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); + QualType Type = PrivateVD->getType(); + auto *CanonicalVD = PrivateVD->getCanonicalDecl(); + if (AlreadyEmittedVars.insert(CanonicalVD).second) { + // If lastprivate variable is a loop control variable for loop-based + // directive, update its value before copyin back to original + // variable. + if (auto *UpExpr = LoopCountersAndUpdates.lookup(CanonicalVD)) { + if (FirstLCV) { + EmitAnyExprToMem(LastIterVal, EmitLValue(IVExpr).getAddress(), + IVExpr->getType().getQualifiers(), + /*IsInitializer=*/false); + EmitIgnoredExpr(IncExpr); + FirstLCV = false; + } + EmitIgnoredExpr(UpExpr); + } + auto *SrcVD = cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl()); + auto *DestVD = cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl()); + // Get the address of the original variable. + auto *OriginalAddr = GetAddrOfLocalVar(DestVD); + // Get the address of the private variable. + auto *PrivateAddr = GetAddrOfLocalVar(PrivateVD); + EmitOMPCopy(*this, Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, + AssignOp); + } + ++IRef; + ++ISrcRef; + ++IDestRef; + } + } + } + EmitBlock(DoneBB, /*IsFinished=*/true); +} + +void CodeGenFunction::EmitOMPReductionClauseInit( + const OMPExecutableDirective &D, + CodeGenFunction::OMPPrivateScope &PrivateScope) { + for (auto &&I = D.getClausesOfKind(OMPC_reduction); I; ++I) { + auto *C = cast<OMPReductionClause>(*I); + auto ILHS = C->lhs_exprs().begin(); + auto IRHS = C->rhs_exprs().begin(); + for (auto IRef : C->varlists()) { + auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl()); + auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); + auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); + // Store the address of the original variable associated with the LHS + // implicit variable. + PrivateScope.addPrivate(LHSVD, [this, OrigVD, IRef]() -> llvm::Value *{ + DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), + CapturedStmtInfo->lookup(OrigVD) != nullptr, + IRef->getType(), VK_LValue, IRef->getExprLoc()); + return EmitLValue(&DRE).getAddress(); + }); + // Emit reduction copy. bool IsRegistered = - PrivateScope.addPrivate(OrigVD, [&]() -> llvm::Value * { - // Emit private VarDecl with copy init. - EmitDecl(*VD); - return GetAddrOfLocalVar(VD); + PrivateScope.addPrivate(OrigVD, [this, PrivateVD]() -> llvm::Value *{ + // Emit private VarDecl with reduction init. + EmitDecl(*PrivateVD); + return GetAddrOfLocalVar(PrivateVD); }); - assert(IsRegistered && "counter already registered as private"); + assert(IsRegistered && "private var already registered as private"); // Silence the warning about unused variable. (void)IsRegistered; - ++IRef; + ++ILHS, ++IRHS; } } } -/// \brief Emits code for OpenMP parallel directive in the parallel region. -static void EmitOMPParallelCall(CodeGenFunction &CGF, - const OMPParallelDirective &S, - llvm::Value *OutlinedFn, - llvm::Value *CapturedStruct) { - if (auto C = S.getSingleClause(/*K*/ OMPC_num_threads)) { +void CodeGenFunction::EmitOMPReductionClauseFinal( + const OMPExecutableDirective &D) { + llvm::SmallVector<const Expr *, 8> LHSExprs; + llvm::SmallVector<const Expr *, 8> RHSExprs; + llvm::SmallVector<const Expr *, 8> ReductionOps; + bool HasAtLeastOneReduction = false; + for (auto &&I = D.getClausesOfKind(OMPC_reduction); I; ++I) { + HasAtLeastOneReduction = true; + auto *C = cast<OMPReductionClause>(*I); + LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end()); + RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end()); + ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end()); + } + if (HasAtLeastOneReduction) { + // Emit nowait reduction if nowait clause is present or directive is a + // parallel directive (it always has implicit barrier). + CGM.getOpenMPRuntime().emitReduction( + *this, D.getLocEnd(), LHSExprs, RHSExprs, ReductionOps, + D.getSingleClause(OMPC_nowait) || + isOpenMPParallelDirective(D.getDirectiveKind())); + } +} + +static void emitCommonOMPParallelDirective(CodeGenFunction &CGF, + const OMPExecutableDirective &S, + const RegionCodeGenTy &CodeGen) { + auto CS = cast<CapturedStmt>(S.getAssociatedStmt()); + auto CapturedStruct = CGF.GenerateCapturedStmtArgument(*CS); + auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction( + S, *CS->getCapturedDecl()->param_begin(), CodeGen); + if (auto C = S.getSingleClause(OMPC_num_threads)) { CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); auto NumThreadsClause = cast<OMPNumThreadsClause>(C); auto NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(), /*IgnoreResultAssign*/ true); - CGF.CGM.getOpenMPRuntime().EmitOMPNumThreadsClause( + CGF.CGM.getOpenMPRuntime().emitNumThreadsClause( CGF, NumThreads, NumThreadsClause->getLocStart()); } - CGF.CGM.getOpenMPRuntime().EmitOMPParallelCall(CGF, S.getLocStart(), - OutlinedFn, CapturedStruct); + const Expr *IfCond = nullptr; + if (auto C = S.getSingleClause(OMPC_if)) { + IfCond = cast<OMPIfClause>(C)->getCondition(); + } + CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn, + CapturedStruct, IfCond); } void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) { - auto CS = cast<CapturedStmt>(S.getAssociatedStmt()); - auto CapturedStruct = GenerateCapturedStmtArgument(*CS); - auto OutlinedFn = CGM.getOpenMPRuntime().EmitOpenMPOutlinedFunction( - S, *CS->getCapturedDecl()->param_begin()); - if (auto C = S.getSingleClause(/*K*/ OMPC_if)) { - auto Cond = cast<OMPIfClause>(C)->getCondition(); - EmitOMPIfClause(*this, Cond, [&](bool ThenBlock) { - if (ThenBlock) - EmitOMPParallelCall(*this, S, OutlinedFn, CapturedStruct); - else - CGM.getOpenMPRuntime().EmitOMPSerialCall(*this, S.getLocStart(), - OutlinedFn, CapturedStruct); - }); - } else - EmitOMPParallelCall(*this, S, OutlinedFn, CapturedStruct); + LexicalScope Scope(*this, S.getSourceRange()); + // Emit parallel region as a standalone region. + auto &&CodeGen = [&S](CodeGenFunction &CGF) { + OMPPrivateScope PrivateScope(CGF); + bool Copyins = CGF.EmitOMPCopyinClause(S); + bool Firstprivates = CGF.EmitOMPFirstprivateClause(S, PrivateScope); + if (Copyins || Firstprivates) { + // Emit implicit barrier to synchronize threads and avoid data races on + // initialization of firstprivate variables or propagation master's thread + // values of threadprivate variables to local instances of that variables + // of all other implicit threads. + CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), + OMPD_unknown); + } + CGF.EmitOMPPrivateClause(S, PrivateScope); + CGF.EmitOMPReductionClauseInit(S, PrivateScope); + (void)PrivateScope.Privatize(); + CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); + CGF.EmitOMPReductionClauseFinal(S); + // Emit implicit barrier at the end of the 'parallel' directive. + CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), + OMPD_unknown); + }; + emitCommonOMPParallelDirective(*this, S, CodeGen); } void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &S, @@ -281,6 +496,14 @@ void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &S, for (auto I : S.updates()) { EmitIgnoredExpr(I); } + // Update the linear variables. + for (auto &&I = S.getClausesOfKind(OMPC_linear); I; ++I) { + auto *C = cast<OMPLinearClause>(*I); + for (auto U : C->updates()) { + EmitIgnoredExpr(U); + } + } + // On a continue in the body, jump to the end. auto Continue = getJumpDestInCurrentScope("omp.body.continue"); BreakContinueStack.push_back(BreakContinue(JumpDest(), Continue)); @@ -297,11 +520,12 @@ void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &S, } } -void CodeGenFunction::EmitOMPInnerLoop(const OMPLoopDirective &S, - OMPPrivateScope &LoopScope, - bool SeparateIter) { +void CodeGenFunction::EmitOMPInnerLoop( + const Stmt &S, bool RequiresCleanup, const Expr *LoopCond, + const Expr *IncExpr, + const llvm::function_ref<void(CodeGenFunction &)> &BodyGen, + const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen) { auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end"); - auto Cnt = getPGORegionCounter(&S); // Start the loop with a block that tests the condition. auto CondBlock = createBasicBlock("omp.inner.for.cond"); @@ -311,35 +535,31 @@ void CodeGenFunction::EmitOMPInnerLoop(const OMPLoopDirective &S, // If there are any cleanups between here and the loop-exit scope, // create a block to stage a loop exit along. auto ExitBlock = LoopExit.getBlock(); - if (LoopScope.requiresCleanups()) + if (RequiresCleanup) ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup"); auto LoopBody = createBasicBlock("omp.inner.for.body"); - // Emit condition: "IV < LastIteration + 1 [ - 1]" - // ("- 1" when lastprivate clause is present - separate one iteration). - llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond(SeparateIter)); - Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, - PGO.createLoopWeights(S.getCond(SeparateIter), Cnt)); - + // Emit condition. + EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S)); if (ExitBlock != LoopExit.getBlock()) { EmitBlock(ExitBlock); EmitBranchThroughCleanup(LoopExit); } EmitBlock(LoopBody); - Cnt.beginRegion(Builder); + incrementProfileCounter(&S); // Create a block for the increment. auto Continue = getJumpDestInCurrentScope("omp.inner.for.inc"); BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); - EmitOMPLoopBody(S); - EmitStopPoint(&S); + BodyGen(*this); // Emit "IV = IV + 1" and a back-edge to the condition block. EmitBlock(Continue.getBlock()); - EmitIgnoredExpr(S.getInc()); + EmitIgnoredExpr(IncExpr); + PostIncGen(*this); BreakContinueStack.pop_back(); EmitBranch(CondBlock); LoopStack.pop(); @@ -350,11 +570,38 @@ void CodeGenFunction::EmitOMPInnerLoop(const OMPLoopDirective &S, void CodeGenFunction::EmitOMPSimdFinal(const OMPLoopDirective &S) { auto IC = S.counters().begin(); for (auto F : S.finals()) { - if (LocalDeclMap.lookup(cast<DeclRefExpr>((*IC))->getDecl())) { + auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl()); + if (LocalDeclMap.lookup(OrigVD)) { + DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), + CapturedStmtInfo->lookup(OrigVD) != nullptr, + (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); + auto *OrigAddr = EmitLValue(&DRE).getAddress(); + OMPPrivateScope VarScope(*this); + VarScope.addPrivate(OrigVD, + [OrigAddr]() -> llvm::Value *{ return OrigAddr; }); + (void)VarScope.Privatize(); EmitIgnoredExpr(F); } ++IC; } + // Emit the final values of the linear variables. + for (auto &&I = S.getClausesOfKind(OMPC_linear); I; ++I) { + auto *C = cast<OMPLinearClause>(*I); + auto IC = C->varlist_begin(); + for (auto F : C->finals()) { + auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl()); + DeclRefExpr DRE(const_cast<VarDecl *>(OrigVD), + CapturedStmtInfo->lookup(OrigVD) != nullptr, + (*IC)->getType(), VK_LValue, (*IC)->getExprLoc()); + auto *OrigAddr = EmitLValue(&DRE).getAddress(); + OMPPrivateScope VarScope(*this); + VarScope.addPrivate(OrigVD, + [OrigAddr]() -> llvm::Value *{ return OrigAddr; }); + (void)VarScope.Privatize(); + EmitIgnoredExpr(F); + ++IC; + } + } } static void EmitOMPAlignedClause(CodeGenFunction &CGF, CodeGenModule &CGM, @@ -388,116 +635,348 @@ static void EmitPrivateLoopCounters(CodeGenFunction &CGF, ArrayRef<Expr *> Counters) { for (auto *E : Counters) { auto VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); - bool IsRegistered = LoopScope.addPrivate(VD, [&]() -> llvm::Value * { + (void)LoopScope.addPrivate(VD, [&]() -> llvm::Value *{ // Emit var without initialization. auto VarEmission = CGF.EmitAutoVarAlloca(*VD); CGF.EmitAutoVarCleanups(VarEmission); return VarEmission.getAllocatedAddress(); }); - assert(IsRegistered && "counter already registered as private"); - // Silence the warning about unused variable. - (void)IsRegistered; } - (void)LoopScope.Privatize(); +} + +static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S, + const Expr *Cond, llvm::BasicBlock *TrueBlock, + llvm::BasicBlock *FalseBlock, uint64_t TrueCount) { + CodeGenFunction::OMPPrivateScope PreCondScope(CGF); + EmitPrivateLoopCounters(CGF, PreCondScope, S.counters()); + const VarDecl *IVDecl = + cast<VarDecl>(cast<DeclRefExpr>(S.getIterationVariable())->getDecl()); + bool IsRegistered = PreCondScope.addPrivate(IVDecl, [&]() -> llvm::Value *{ + // Emit var without initialization. + auto VarEmission = CGF.EmitAutoVarAlloca(*IVDecl); + CGF.EmitAutoVarCleanups(VarEmission); + return VarEmission.getAllocatedAddress(); + }); + assert(IsRegistered && "counter already registered as private"); + // Silence the warning about unused variable. + (void)IsRegistered; + (void)PreCondScope.Privatize(); + // Initialize internal counter to 0 to calculate initial values of real + // counters. + LValue IV = CGF.EmitLValue(S.getIterationVariable()); + CGF.EmitStoreOfScalar( + llvm::ConstantInt::getNullValue( + IV.getAddress()->getType()->getPointerElementType()), + CGF.EmitLValue(S.getIterationVariable()), /*isInit=*/true); + // Get initial values of real counters. + for (auto I : S.updates()) { + CGF.EmitIgnoredExpr(I); + } + // Check that loop is executed at least one time. + CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount); +} + +static void +EmitPrivateLinearVars(CodeGenFunction &CGF, const OMPExecutableDirective &D, + CodeGenFunction::OMPPrivateScope &PrivateScope) { + for (auto &&I = D.getClausesOfKind(OMPC_linear); I; ++I) { + auto *C = cast<OMPLinearClause>(*I); + for (auto *E : C->varlists()) { + auto VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); + bool IsRegistered = PrivateScope.addPrivate(VD, [&]()->llvm::Value * { + // Emit var without initialization. + auto VarEmission = CGF.EmitAutoVarAlloca(*VD); + CGF.EmitAutoVarCleanups(VarEmission); + return VarEmission.getAllocatedAddress(); + }); + assert(IsRegistered && "linear var already registered as private"); + // Silence the warning about unused variable. + (void)IsRegistered; + } + } } void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) { - // Pragma 'simd' code depends on presence of 'lastprivate'. - // If present, we have to separate last iteration of the loop: + auto &&CodeGen = [&S](CodeGenFunction &CGF) { + // Pragma 'simd' code depends on presence of 'lastprivate'. + // If present, we have to separate last iteration of the loop: + // + // if (PreCond) { + // for (IV in 0..LastIteration-1) BODY; + // BODY with updates of lastprivate vars; + // <Final counter/linear vars updates>; + // } + // + // otherwise (when there's no lastprivate): + // + // if (PreCond) { + // for (IV in 0..LastIteration) BODY; + // <Final counter/linear vars updates>; + // } + // + + // Emit: if (PreCond) - begin. + // If the condition constant folds and can be elided, avoid emitting the + // whole loop. + bool CondConstant; + llvm::BasicBlock *ContBlock = nullptr; + if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { + if (!CondConstant) + return; + } else { + auto *ThenBlock = CGF.createBasicBlock("simd.if.then"); + ContBlock = CGF.createBasicBlock("simd.if.end"); + emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock, + CGF.getProfileCount(&S)); + CGF.EmitBlock(ThenBlock); + CGF.incrementProfileCounter(&S); + } + // Walk clauses and process safelen/lastprivate. + bool SeparateIter = false; + CGF.LoopStack.setParallel(); + CGF.LoopStack.setVectorizerEnable(true); + for (auto C : S.clauses()) { + switch (C->getClauseKind()) { + case OMPC_safelen: { + RValue Len = CGF.EmitAnyExpr(cast<OMPSafelenClause>(C)->getSafelen(), + AggValueSlot::ignored(), true); + llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); + CGF.LoopStack.setVectorizerWidth(Val->getZExtValue()); + // In presence of finite 'safelen', it may be unsafe to mark all + // the memory instructions parallel, because loop-carried + // dependences of 'safelen' iterations are possible. + CGF.LoopStack.setParallel(false); + break; + } + case OMPC_aligned: + EmitOMPAlignedClause(CGF, CGF.CGM, cast<OMPAlignedClause>(*C)); + break; + case OMPC_lastprivate: + SeparateIter = true; + break; + default: + // Not handled yet + ; + } + } + + // Emit inits for the linear variables. + for (auto &&I = S.getClausesOfKind(OMPC_linear); I; ++I) { + auto *C = cast<OMPLinearClause>(*I); + for (auto Init : C->inits()) { + auto *D = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl()); + CGF.EmitVarDecl(*D); + } + } + + // Emit the loop iteration variable. + const Expr *IVExpr = S.getIterationVariable(); + const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); + CGF.EmitVarDecl(*IVDecl); + CGF.EmitIgnoredExpr(S.getInit()); + + // Emit the iterations count variable. + // If it is not a variable, Sema decided to calculate iterations count on + // each iteration (e.g., it is foldable into a constant). + if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { + CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); + // Emit calculation of the iterations count. + CGF.EmitIgnoredExpr(S.getCalcLastIteration()); + } + + // Emit the linear steps for the linear clauses. + // If a step is not constant, it is pre-calculated before the loop. + for (auto &&I = S.getClausesOfKind(OMPC_linear); I; ++I) { + auto *C = cast<OMPLinearClause>(*I); + if (auto CS = cast_or_null<BinaryOperator>(C->getCalcStep())) + if (auto SaveRef = cast<DeclRefExpr>(CS->getLHS())) { + CGF.EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl())); + // Emit calculation of the linear step. + CGF.EmitIgnoredExpr(CS); + } + } + + { + OMPPrivateScope LoopScope(CGF); + EmitPrivateLoopCounters(CGF, LoopScope, S.counters()); + EmitPrivateLinearVars(CGF, S, LoopScope); + CGF.EmitOMPPrivateClause(S, LoopScope); + (void)LoopScope.Privatize(); + CGF.EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), + S.getCond(SeparateIter), S.getInc(), + [&S](CodeGenFunction &CGF) { + CGF.EmitOMPLoopBody(S); + CGF.EmitStopPoint(&S); + }, + [](CodeGenFunction &) {}); + if (SeparateIter) { + CGF.EmitOMPLoopBody(S, /*SeparateIter=*/true); + } + } + CGF.EmitOMPSimdFinal(S); + // Emit: if (PreCond) - end. + if (ContBlock) { + CGF.EmitBranch(ContBlock); + CGF.EmitBlock(ContBlock, true); + } + }; + CGM.getOpenMPRuntime().emitInlinedDirective(*this, CodeGen); +} + +void CodeGenFunction::EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind, + const OMPLoopDirective &S, + OMPPrivateScope &LoopScope, + bool Ordered, llvm::Value *LB, + llvm::Value *UB, llvm::Value *ST, + llvm::Value *IL, llvm::Value *Chunk) { + auto &RT = CGM.getOpenMPRuntime(); + + // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime). + const bool DynamicOrOrdered = Ordered || RT.isDynamic(ScheduleKind); + + assert((Ordered || + !RT.isStaticNonchunked(ScheduleKind, /*Chunked=*/Chunk != nullptr)) && + "static non-chunked schedule does not need outer loop"); + + // Emit outer loop. + // + // OpenMP [2.7.1, Loop Construct, Description, table 2-1] + // When schedule(dynamic,chunk_size) is specified, the iterations are + // distributed to threads in the team in chunks as the threads request them. + // Each thread executes a chunk of iterations, then requests another chunk, + // until no chunks remain to be distributed. Each chunk contains chunk_size + // iterations, except for the last chunk to be distributed, which may have + // fewer iterations. When no chunk_size is specified, it defaults to 1. // - // if (LastIteration != 0) { - // for (IV in 0..LastIteration-1) BODY; - // BODY with updates of lastprivate vars; - // <Final counter/linear vars updates>; + // When schedule(guided,chunk_size) is specified, the iterations are assigned + // to threads in the team in chunks as the executing threads request them. + // Each thread executes a chunk of iterations, then requests another chunk, + // until no chunks remain to be assigned. For a chunk_size of 1, the size of + // each chunk is proportional to the number of unassigned iterations divided + // by the number of threads in the team, decreasing to 1. For a chunk_size + // with value k (greater than 1), the size of each chunk is determined in the + // same way, with the restriction that the chunks do not contain fewer than k + // iterations (except for the last chunk to be assigned, which may have fewer + // than k iterations). + // + // When schedule(auto) is specified, the decision regarding scheduling is + // delegated to the compiler and/or runtime system. The programmer gives the + // implementation the freedom to choose any possible mapping of iterations to + // threads in the team. + // + // When schedule(runtime) is specified, the decision regarding scheduling is + // deferred until run time, and the schedule and chunk size are taken from the + // run-sched-var ICV. If the ICV is set to auto, the schedule is + // implementation defined + // + // while(__kmpc_dispatch_next(&LB, &UB)) { + // idx = LB; + // while (idx <= UB) { BODY; ++idx; + // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only. + // } // inner loop // } // - // otherwise (when there's no lastprivate): + // OpenMP [2.7.1, Loop Construct, Description, table 2-1] + // When schedule(static, chunk_size) is specified, iterations are divided into + // chunks of size chunk_size, and the chunks are assigned to the threads in + // the team in a round-robin fashion in the order of the thread number. // - // for (IV in 0..LastIteration) BODY; - // <Final counter/linear vars updates>; + // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) { + // while (idx <= UB) { BODY; ++idx; } // inner loop + // LB = LB + ST; + // UB = UB + ST; + // } // - // Walk clauses and process safelen/lastprivate. - bool SeparateIter = false; - LoopStack.setParallel(); - LoopStack.setVectorizerEnable(true); - for (auto C : S.clauses()) { - switch (C->getClauseKind()) { - case OMPC_safelen: { - RValue Len = EmitAnyExpr(cast<OMPSafelenClause>(C)->getSafelen(), - AggValueSlot::ignored(), true); - llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal()); - LoopStack.setVectorizerWidth(Val->getZExtValue()); - // In presence of finite 'safelen', it may be unsafe to mark all - // the memory instructions parallel, because loop-carried - // dependences of 'safelen' iterations are possible. - LoopStack.setParallel(false); - break; - } - case OMPC_aligned: - EmitOMPAlignedClause(*this, CGM, cast<OMPAlignedClause>(*C)); - break; - case OMPC_lastprivate: - SeparateIter = true; - break; - default: - // Not handled yet - ; - } - } + const Expr *IVExpr = S.getIterationVariable(); + const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); + const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); - InlinedOpenMPRegion Region(*this, S.getAssociatedStmt()); - RunCleanupsScope DirectiveScope(*this); + RT.emitForInit( + *this, S.getLocStart(), ScheduleKind, IVSize, IVSigned, Ordered, IL, LB, + (DynamicOrOrdered ? EmitAnyExpr(S.getLastIteration()).getScalarVal() + : UB), + ST, Chunk); - CGDebugInfo *DI = getDebugInfo(); - if (DI) - DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin()); + auto LoopExit = getJumpDestInCurrentScope("omp.dispatch.end"); - // Emit the loop iteration variable. - const Expr *IVExpr = S.getIterationVariable(); - const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl()); - EmitVarDecl(*IVDecl); - EmitIgnoredExpr(S.getInit()); + // Start the loop with a block that tests the condition. + auto CondBlock = createBasicBlock("omp.dispatch.cond"); + EmitBlock(CondBlock); + LoopStack.push(CondBlock); - // Emit the iterations count variable. - // If it is not a variable, Sema decided to calculate iterations count on each - // iteration (e.g., it is foldable into a constant). - if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) { - EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl())); - // Emit calculation of the iterations count. - EmitIgnoredExpr(S.getCalcLastIteration()); + llvm::Value *BoolCondVal = nullptr; + if (!DynamicOrOrdered) { + // UB = min(UB, GlobalUB) + EmitIgnoredExpr(S.getEnsureUpperBound()); + // IV = LB + EmitIgnoredExpr(S.getInit()); + // IV < UB + BoolCondVal = EvaluateExprAsBool(S.getCond(false)); + } else { + BoolCondVal = RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned, + IL, LB, UB, ST); } - if (SeparateIter) { - // Emit: if (LastIteration > 0) - begin. - RegionCounter Cnt = getPGORegionCounter(&S); - auto ThenBlock = createBasicBlock("simd.if.then"); - auto ContBlock = createBasicBlock("simd.if.end"); - EmitBranchOnBoolExpr(S.getPreCond(), ThenBlock, ContBlock, Cnt.getCount()); - EmitBlock(ThenBlock); - Cnt.beginRegion(Builder); - // Emit 'then' code. - { - OMPPrivateScope LoopScope(*this); - EmitPrivateLoopCounters(*this, LoopScope, S.counters()); - EmitOMPInnerLoop(S, LoopScope, /* SeparateIter */ true); - EmitOMPLoopBody(S, /* SeparateIter */ true); - } - EmitOMPSimdFinal(S); - // Emit: if (LastIteration != 0) - end. - EmitBranch(ContBlock); - EmitBlock(ContBlock, true); - } else { - { - OMPPrivateScope LoopScope(*this); - EmitPrivateLoopCounters(*this, LoopScope, S.counters()); - EmitOMPInnerLoop(S, LoopScope); - } - EmitOMPSimdFinal(S); + // If there are any cleanups between here and the loop-exit scope, + // create a block to stage a loop exit along. + auto ExitBlock = LoopExit.getBlock(); + if (LoopScope.requiresCleanups()) + ExitBlock = createBasicBlock("omp.dispatch.cleanup"); + + auto LoopBody = createBasicBlock("omp.dispatch.body"); + Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock); + if (ExitBlock != LoopExit.getBlock()) { + EmitBlock(ExitBlock); + EmitBranchThroughCleanup(LoopExit); + } + EmitBlock(LoopBody); + + // Emit "IV = LB" (in case of static schedule, we have already calculated new + // LB for loop condition and emitted it above). + if (DynamicOrOrdered) + EmitIgnoredExpr(S.getInit()); + + // Create a block for the increment. + auto Continue = getJumpDestInCurrentScope("omp.dispatch.inc"); + BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); + + SourceLocation Loc = S.getLocStart(); + // Generate !llvm.loop.parallel metadata for loads and stores for loops with + // dynamic/guided scheduling and without ordered clause. + LoopStack.setParallel((ScheduleKind == OMPC_SCHEDULE_dynamic || + ScheduleKind == OMPC_SCHEDULE_guided) && + !Ordered); + EmitOMPInnerLoop( + S, LoopScope.requiresCleanups(), S.getCond(/*SeparateIter=*/false), + S.getInc(), + [&S](CodeGenFunction &CGF) { + CGF.EmitOMPLoopBody(S); + CGF.EmitStopPoint(&S); + }, + [Ordered, IVSize, IVSigned, Loc](CodeGenFunction &CGF) { + if (Ordered) { + CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd( + CGF, Loc, IVSize, IVSigned); + } + }); + + EmitBlock(Continue.getBlock()); + BreakContinueStack.pop_back(); + if (!DynamicOrOrdered) { + // Emit "LB = LB + Stride", "UB = UB + Stride". + EmitIgnoredExpr(S.getNextLowerBound()); + EmitIgnoredExpr(S.getNextUpperBound()); } - if (DI) - DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd()); + EmitBranch(CondBlock); + LoopStack.pop(); + // Emit the fall-through block. + EmitBlock(LoopExit.getBlock()); + + // Tell the runtime we are done. + if (!DynamicOrOrdered) + RT.emitForStaticFinish(*this, S.getLocEnd()); } /// \brief Emit a helper variable and return corresponding lvalue. @@ -508,7 +987,39 @@ static LValue EmitOMPHelperVar(CodeGenFunction &CGF, return CGF.EmitLValue(Helper); } -void CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) { +static std::pair<llvm::Value * /*Chunk*/, OpenMPScheduleClauseKind> +emitScheduleClause(CodeGenFunction &CGF, const OMPLoopDirective &S, + bool OuterRegion) { + // Detect the loop schedule kind and chunk. + auto ScheduleKind = OMPC_SCHEDULE_unknown; + llvm::Value *Chunk = nullptr; + if (auto *C = + cast_or_null<OMPScheduleClause>(S.getSingleClause(OMPC_schedule))) { + ScheduleKind = C->getScheduleKind(); + if (const auto *Ch = C->getChunkSize()) { + if (auto *ImpRef = cast_or_null<DeclRefExpr>(C->getHelperChunkSize())) { + if (OuterRegion) { + const VarDecl *ImpVar = cast<VarDecl>(ImpRef->getDecl()); + CGF.EmitVarDecl(*ImpVar); + CGF.EmitStoreThroughLValue( + CGF.EmitAnyExpr(Ch), + CGF.MakeNaturalAlignAddrLValue(CGF.GetAddrOfLocalVar(ImpVar), + ImpVar->getType())); + } else { + Ch = ImpRef; + } + } + if (!C->getHelperChunkSize() || !OuterRegion) { + Chunk = CGF.EmitScalarExpr(Ch); + Chunk = CGF.EmitScalarConversion(Chunk, Ch->getType(), + S.getIterationVariable()->getType()); + } + } + } + return std::make_pair(Chunk, ScheduleKind); +} + +bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) { // Emit the loop iteration variable. auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable()); auto IVDecl = cast<VarDecl>(IVExpr->getDecl()); @@ -525,15 +1036,25 @@ void CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) { auto &RT = CGM.getOpenMPRuntime(); + bool HasLastprivateClause; // Check pre-condition. { // Skip the entire loop if we don't meet the precondition. - RegionCounter Cnt = getPGORegionCounter(&S); - auto ThenBlock = createBasicBlock("omp.precond.then"); - auto ContBlock = createBasicBlock("omp.precond.end"); - EmitBranchOnBoolExpr(S.getPreCond(), ThenBlock, ContBlock, Cnt.getCount()); - EmitBlock(ThenBlock); - Cnt.beginRegion(Builder); + // If the condition constant folds and can be elided, avoid emitting the + // whole loop. + bool CondConstant; + llvm::BasicBlock *ContBlock = nullptr; + if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) { + if (!CondConstant) + return false; + } else { + auto *ThenBlock = createBasicBlock("omp.precond.then"); + ContBlock = createBasicBlock("omp.precond.end"); + emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock, + getProfileCount(&S)); + EmitBlock(ThenBlock); + incrementProfileCounter(&S); + } // Emit 'then' code. { // Emit helper vars inits. @@ -547,105 +1068,342 @@ void CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) { EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable())); OMPPrivateScope LoopScope(*this); + if (EmitOMPFirstprivateClause(S, LoopScope)) { + // Emit implicit barrier to synchronize threads and avoid data races on + // initialization of firstprivate variables. + CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), + OMPD_unknown); + } + EmitOMPPrivateClause(S, LoopScope); + HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope); + EmitOMPReductionClauseInit(S, LoopScope); EmitPrivateLoopCounters(*this, LoopScope, S.counters()); + (void)LoopScope.Privatize(); // Detect the loop schedule kind and chunk. - auto ScheduleKind = OMPC_SCHEDULE_unknown; - llvm::Value *Chunk = nullptr; - if (auto C = cast_or_null<OMPScheduleClause>( - S.getSingleClause(OMPC_schedule))) { - ScheduleKind = C->getScheduleKind(); - if (auto Ch = C->getChunkSize()) { - Chunk = EmitScalarExpr(Ch); - Chunk = EmitScalarConversion(Chunk, Ch->getType(), - S.getIterationVariable()->getType()); - } - } + llvm::Value *Chunk; + OpenMPScheduleClauseKind ScheduleKind; + auto ScheduleInfo = + emitScheduleClause(*this, S, /*OuterRegion=*/false); + Chunk = ScheduleInfo.first; + ScheduleKind = ScheduleInfo.second; const unsigned IVSize = getContext().getTypeSize(IVExpr->getType()); const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation(); + const bool Ordered = S.getSingleClause(OMPC_ordered) != nullptr; if (RT.isStaticNonchunked(ScheduleKind, - /* Chunked */ Chunk != nullptr)) { + /* Chunked */ Chunk != nullptr) && + !Ordered) { // OpenMP [2.7.1, Loop Construct, Description, table 2-1] // When no chunk_size is specified, the iteration space is divided into // chunks that are approximately equal in size, and at most one chunk is // distributed to each thread. Note that the size of the chunks is // unspecified in this case. - RT.EmitOMPForInit(*this, S.getLocStart(), ScheduleKind, IVSize, IVSigned, - IL.getAddress(), LB.getAddress(), UB.getAddress(), - ST.getAddress()); + RT.emitForInit(*this, S.getLocStart(), ScheduleKind, IVSize, IVSigned, + Ordered, IL.getAddress(), LB.getAddress(), + UB.getAddress(), ST.getAddress()); // UB = min(UB, GlobalUB); EmitIgnoredExpr(S.getEnsureUpperBound()); // IV = LB; EmitIgnoredExpr(S.getInit()); // while (idx <= UB) { BODY; ++idx; } - EmitOMPInnerLoop(S, LoopScope); + EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), + S.getCond(/*SeparateIter=*/false), S.getInc(), + [&S](CodeGenFunction &CGF) { + CGF.EmitOMPLoopBody(S); + CGF.EmitStopPoint(&S); + }, + [](CodeGenFunction &) {}); // Tell the runtime we are done. - RT.EmitOMPForFinish(*this, S.getLocStart(), ScheduleKind); - } else - ErrorUnsupported(&S, "OpenMP loop with requested schedule"); + RT.emitForStaticFinish(*this, S.getLocStart()); + } else { + // Emit the outer loop, which requests its work chunk [LB..UB] from + // runtime and runs the inner loop to process it. + EmitOMPForOuterLoop(ScheduleKind, S, LoopScope, Ordered, + LB.getAddress(), UB.getAddress(), ST.getAddress(), + IL.getAddress(), Chunk); + } + EmitOMPReductionClauseFinal(S); + // Emit final copy of the lastprivate variables if IsLastIter != 0. + if (HasLastprivateClause) + EmitOMPLastprivateClauseFinal( + S, Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getLocStart()))); } // We're now done with the loop, so jump to the continuation block. - EmitBranch(ContBlock); - EmitBlock(ContBlock, true); + if (ContBlock) { + EmitBranch(ContBlock); + EmitBlock(ContBlock, true); + } } + return HasLastprivateClause; } void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) { - InlinedOpenMPRegion Region(*this, S.getAssociatedStmt()); - RunCleanupsScope DirectiveScope(*this); - - CGDebugInfo *DI = getDebugInfo(); - if (DI) - DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin()); - - EmitOMPWorksharingLoop(S); + LexicalScope Scope(*this, S.getSourceRange()); + bool HasLastprivates = false; + auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF) { + HasLastprivates = CGF.EmitOMPWorksharingLoop(S); + }; + CGM.getOpenMPRuntime().emitInlinedDirective(*this, CodeGen); // Emit an implicit barrier at the end. - CGM.getOpenMPRuntime().EmitOMPBarrierCall(*this, S.getLocStart(), - /*IsExplicit*/ false); - if (DI) - DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd()); + if (!S.getSingleClause(OMPC_nowait) || HasLastprivates) { + CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_for); + } } void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &) { llvm_unreachable("CodeGen for 'omp for simd' is not supported yet."); } -void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &) { - llvm_unreachable("CodeGen for 'omp sections' is not supported yet."); +static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty, + const Twine &Name, + llvm::Value *Init = nullptr) { + auto LVal = CGF.MakeNaturalAlignAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty); + if (Init) + CGF.EmitScalarInit(Init, LVal); + return LVal; } -void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &) { - llvm_unreachable("CodeGen for 'omp section' is not supported yet."); +static OpenMPDirectiveKind emitSections(CodeGenFunction &CGF, + const OMPExecutableDirective &S) { + auto *Stmt = cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt(); + auto *CS = dyn_cast<CompoundStmt>(Stmt); + if (CS && CS->size() > 1) { + bool HasLastprivates = false; + auto &&CodeGen = [&S, CS, &HasLastprivates](CodeGenFunction &CGF) { + auto &C = CGF.CGM.getContext(); + auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); + // Emit helper vars inits. + LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.", + CGF.Builder.getInt32(0)); + auto *GlobalUBVal = CGF.Builder.getInt32(CS->size() - 1); + LValue UB = + createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal); + LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.", + CGF.Builder.getInt32(1)); + LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.", + CGF.Builder.getInt32(0)); + // Loop counter. + LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv."); + OpaqueValueExpr IVRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue); + CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV); + OpaqueValueExpr UBRefExpr(S.getLocStart(), KmpInt32Ty, VK_LValue); + CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB); + // Generate condition for loop. + BinaryOperator Cond(&IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue, + OK_Ordinary, S.getLocStart(), + /*fpContractable=*/false); + // Increment for loop counter. + UnaryOperator Inc(&IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, + OK_Ordinary, S.getLocStart()); + auto BodyGen = [CS, &S, &IV](CodeGenFunction &CGF) { + // Iterate through all sections and emit a switch construct: + // switch (IV) { + // case 0: + // <SectionStmt[0]>; + // break; + // ... + // case <NumSection> - 1: + // <SectionStmt[<NumSection> - 1]>; + // break; + // } + // .omp.sections.exit: + auto *ExitBB = CGF.createBasicBlock(".omp.sections.exit"); + auto *SwitchStmt = CGF.Builder.CreateSwitch( + CGF.EmitLoadOfLValue(IV, S.getLocStart()).getScalarVal(), ExitBB, + CS->size()); + unsigned CaseNumber = 0; + for (auto C = CS->children(); C; ++C, ++CaseNumber) { + auto CaseBB = CGF.createBasicBlock(".omp.sections.case"); + CGF.EmitBlock(CaseBB); + SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB); + CGF.EmitStmt(*C); + CGF.EmitBranch(ExitBB); + } + CGF.EmitBlock(ExitBB, /*IsFinished=*/true); + }; + + CodeGenFunction::OMPPrivateScope LoopScope(CGF); + if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) { + // Emit implicit barrier to synchronize threads and avoid data races on + // initialization of firstprivate variables. + CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), + OMPD_unknown); + } + CGF.EmitOMPPrivateClause(S, LoopScope); + HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope); + CGF.EmitOMPReductionClauseInit(S, LoopScope); + (void)LoopScope.Privatize(); + + // Emit static non-chunked loop. + CGF.CGM.getOpenMPRuntime().emitForInit( + CGF, S.getLocStart(), OMPC_SCHEDULE_static, /*IVSize=*/32, + /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(), + LB.getAddress(), UB.getAddress(), ST.getAddress()); + // UB = min(UB, GlobalUB); + auto *UBVal = CGF.EmitLoadOfScalar(UB, S.getLocStart()); + auto *MinUBGlobalUB = CGF.Builder.CreateSelect( + CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal); + CGF.EmitStoreOfScalar(MinUBGlobalUB, UB); + // IV = LB; + CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getLocStart()), IV); + // while (idx <= UB) { BODY; ++idx; } + CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, &Cond, &Inc, BodyGen, + [](CodeGenFunction &) {}); + // Tell the runtime we are done. + CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getLocStart()); + CGF.EmitOMPReductionClauseFinal(S); + + // Emit final copy of the lastprivate variables if IsLastIter != 0. + if (HasLastprivates) + CGF.EmitOMPLastprivateClauseFinal( + S, CGF.Builder.CreateIsNotNull( + CGF.EmitLoadOfScalar(IL, S.getLocStart()))); + }; + + CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, CodeGen); + // Emit barrier for lastprivates only if 'sections' directive has 'nowait' + // clause. Otherwise the barrier will be generated by the codegen for the + // directive. + if (HasLastprivates && S.getSingleClause(OMPC_nowait)) { + // Emit implicit barrier to synchronize threads and avoid data races on + // initialization of firstprivate variables. + CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), + OMPD_unknown); + } + return OMPD_sections; + } + // If only one section is found - no need to generate loop, emit as a single + // region. + bool HasFirstprivates; + // No need to generate reductions for sections with single section region, we + // can use original shared variables for all operations. + bool HasReductions = !S.getClausesOfKind(OMPC_reduction).empty(); + // No need to generate lastprivates for sections with single section region, + // we can use original shared variable for all calculations with barrier at + // the end of the sections. + bool HasLastprivates = !S.getClausesOfKind(OMPC_lastprivate).empty(); + auto &&CodeGen = [Stmt, &S, &HasFirstprivates](CodeGenFunction &CGF) { + CodeGenFunction::OMPPrivateScope SingleScope(CGF); + HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope); + CGF.EmitOMPPrivateClause(S, SingleScope); + (void)SingleScope.Privatize(); + + CGF.EmitStmt(Stmt); + CGF.EnsureInsertPoint(); + }; + CGF.CGM.getOpenMPRuntime().emitSingleRegion(CGF, CodeGen, S.getLocStart(), + llvm::None, llvm::None, + llvm::None, llvm::None); + // Emit barrier for firstprivates, lastprivates or reductions only if + // 'sections' directive has 'nowait' clause. Otherwise the barrier will be + // generated by the codegen for the directive. + if ((HasFirstprivates || HasLastprivates || HasReductions) && + S.getSingleClause(OMPC_nowait)) { + // Emit implicit barrier to synchronize threads and avoid data races on + // initialization of firstprivate variables. + CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), + OMPD_unknown); + } + return OMPD_single; +} + +void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) { + LexicalScope Scope(*this, S.getSourceRange()); + OpenMPDirectiveKind EmittedAs = emitSections(*this, S); + // Emit an implicit barrier at the end. + if (!S.getSingleClause(OMPC_nowait)) { + CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), EmittedAs); + } } -void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &) { - llvm_unreachable("CodeGen for 'omp single' is not supported yet."); +void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) { + LexicalScope Scope(*this, S.getSourceRange()); + auto &&CodeGen = [&S](CodeGenFunction &CGF) { + CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); + CGF.EnsureInsertPoint(); + }; + CGM.getOpenMPRuntime().emitInlinedDirective(*this, CodeGen); +} + +void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) { + llvm::SmallVector<const Expr *, 8> CopyprivateVars; + llvm::SmallVector<const Expr *, 8> DestExprs; + llvm::SmallVector<const Expr *, 8> SrcExprs; + llvm::SmallVector<const Expr *, 8> AssignmentOps; + // Check if there are any 'copyprivate' clauses associated with this + // 'single' + // construct. + // Build a list of copyprivate variables along with helper expressions + // (<source>, <destination>, <destination>=<source> expressions) + for (auto &&I = S.getClausesOfKind(OMPC_copyprivate); I; ++I) { + auto *C = cast<OMPCopyprivateClause>(*I); + CopyprivateVars.append(C->varlists().begin(), C->varlists().end()); + DestExprs.append(C->destination_exprs().begin(), + C->destination_exprs().end()); + SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end()); + AssignmentOps.append(C->assignment_ops().begin(), + C->assignment_ops().end()); + } + LexicalScope Scope(*this, S.getSourceRange()); + // Emit code for 'single' region along with 'copyprivate' clauses + bool HasFirstprivates; + auto &&CodeGen = [&S, &HasFirstprivates](CodeGenFunction &CGF) { + CodeGenFunction::OMPPrivateScope SingleScope(CGF); + HasFirstprivates = CGF.EmitOMPFirstprivateClause(S, SingleScope); + CGF.EmitOMPPrivateClause(S, SingleScope); + (void)SingleScope.Privatize(); + + CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); + CGF.EnsureInsertPoint(); + }; + CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getLocStart(), + CopyprivateVars, DestExprs, SrcExprs, + AssignmentOps); + // Emit an implicit barrier at the end (to avoid data race on firstprivate + // init or if no 'nowait' clause was specified and no 'copyprivate' clause). + if ((!S.getSingleClause(OMPC_nowait) || HasFirstprivates) && + CopyprivateVars.empty()) { + CGM.getOpenMPRuntime().emitBarrierCall( + *this, S.getLocStart(), + S.getSingleClause(OMPC_nowait) ? OMPD_unknown : OMPD_single); + } } void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) { - CGM.getOpenMPRuntime().EmitOMPMasterRegion(*this, [&]() -> void { - InlinedOpenMPRegion Region(*this, S.getAssociatedStmt()); - RunCleanupsScope Scope(*this); - EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); - EnsureInsertPoint(); - }, S.getLocStart()); + LexicalScope Scope(*this, S.getSourceRange()); + auto &&CodeGen = [&S](CodeGenFunction &CGF) { + CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); + CGF.EnsureInsertPoint(); + }; + CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getLocStart()); } void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) { - CGM.getOpenMPRuntime().EmitOMPCriticalRegion( - *this, S.getDirectiveName().getAsString(), [&]() -> void { - InlinedOpenMPRegion Region(*this, S.getAssociatedStmt()); - RunCleanupsScope Scope(*this); - EmitStmt( - cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); - EnsureInsertPoint(); - }, S.getLocStart()); + LexicalScope Scope(*this, S.getSourceRange()); + auto &&CodeGen = [&S](CodeGenFunction &CGF) { + CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); + CGF.EnsureInsertPoint(); + }; + CGM.getOpenMPRuntime().emitCriticalRegion( + *this, S.getDirectiveName().getAsString(), CodeGen, S.getLocStart()); } -void -CodeGenFunction::EmitOMPParallelForDirective(const OMPParallelForDirective &) { - llvm_unreachable("CodeGen for 'omp parallel for' is not supported yet."); +void CodeGenFunction::EmitOMPParallelForDirective( + const OMPParallelForDirective &S) { + // Emit directive as a combined directive that consists of two implicit + // directives: 'parallel' with 'for' directive. + LexicalScope Scope(*this, S.getSourceRange()); + (void)emitScheduleClause(*this, S, /*OuterRegion=*/true); + auto &&CodeGen = [&S](CodeGenFunction &CGF) { + CGF.EmitOMPWorksharingLoop(S); + // Emit implicit barrier at the end of parallel region, but this barrier + // is at the end of 'for' directive, so emit it as the implicit barrier for + // this 'for' directive. + CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), + OMPD_parallel); + }; + emitCommonOMPParallelDirective(*this, S, CodeGen); } void CodeGenFunction::EmitOMPParallelForSimdDirective( @@ -654,45 +1412,600 @@ void CodeGenFunction::EmitOMPParallelForSimdDirective( } void CodeGenFunction::EmitOMPParallelSectionsDirective( - const OMPParallelSectionsDirective &) { - llvm_unreachable("CodeGen for 'omp parallel sections' is not supported yet."); + const OMPParallelSectionsDirective &S) { + // Emit directive as a combined directive that consists of two implicit + // directives: 'parallel' with 'sections' directive. + LexicalScope Scope(*this, S.getSourceRange()); + auto &&CodeGen = [&S](CodeGenFunction &CGF) { + (void)emitSections(CGF, S); + // Emit implicit barrier at the end of parallel region. + CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getLocStart(), + OMPD_parallel); + }; + emitCommonOMPParallelDirective(*this, S, CodeGen); } -void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &) { - llvm_unreachable("CodeGen for 'omp task' is not supported yet."); +void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) { + // Emit outlined function for task construct. + LexicalScope Scope(*this, S.getSourceRange()); + auto CS = cast<CapturedStmt>(S.getAssociatedStmt()); + auto CapturedStruct = GenerateCapturedStmtArgument(*CS); + auto *I = CS->getCapturedDecl()->param_begin(); + auto *PartId = std::next(I); + // The first function argument for tasks is a thread id, the second one is a + // part id (0 for tied tasks, >=0 for untied task). + llvm::DenseSet<const VarDecl *> EmittedAsPrivate; + // Get list of private variables. + llvm::SmallVector<const Expr *, 8> PrivateVars; + llvm::SmallVector<const Expr *, 8> PrivateCopies; + for (auto &&I = S.getClausesOfKind(OMPC_private); I; ++I) { + auto *C = cast<OMPPrivateClause>(*I); + auto IRef = C->varlist_begin(); + for (auto *IInit : C->private_copies()) { + auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); + if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { + PrivateVars.push_back(*IRef); + PrivateCopies.push_back(IInit); + } + ++IRef; + } + } + EmittedAsPrivate.clear(); + // Get list of firstprivate variables. + llvm::SmallVector<const Expr *, 8> FirstprivateVars; + llvm::SmallVector<const Expr *, 8> FirstprivateCopies; + llvm::SmallVector<const Expr *, 8> FirstprivateInits; + for (auto &&I = S.getClausesOfKind(OMPC_firstprivate); I; ++I) { + auto *C = cast<OMPFirstprivateClause>(*I); + auto IRef = C->varlist_begin(); + auto IElemInitRef = C->inits().begin(); + for (auto *IInit : C->private_copies()) { + auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl()); + if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) { + FirstprivateVars.push_back(*IRef); + FirstprivateCopies.push_back(IInit); + FirstprivateInits.push_back(*IElemInitRef); + } + ++IRef, ++IElemInitRef; + } + } + auto &&CodeGen = [PartId, &S, &PrivateVars, &FirstprivateVars]( + CodeGenFunction &CGF) { + // Set proper addresses for generated private copies. + auto *CS = cast<CapturedStmt>(S.getAssociatedStmt()); + OMPPrivateScope Scope(CGF); + if (!PrivateVars.empty() || !FirstprivateVars.empty()) { + auto *CopyFn = CGF.Builder.CreateAlignedLoad( + CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(3)), + CGF.PointerAlignInBytes); + auto *PrivatesPtr = CGF.Builder.CreateAlignedLoad( + CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(2)), + CGF.PointerAlignInBytes); + // Map privates. + llvm::SmallVector<std::pair<const VarDecl *, llvm::Value *>, 16> + PrivatePtrs; + llvm::SmallVector<llvm::Value *, 16> CallArgs; + CallArgs.push_back(PrivatesPtr); + for (auto *E : PrivateVars) { + auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); + auto *PrivatePtr = + CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType())); + PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr)); + CallArgs.push_back(PrivatePtr); + } + for (auto *E : FirstprivateVars) { + auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); + auto *PrivatePtr = + CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType())); + PrivatePtrs.push_back(std::make_pair(VD, PrivatePtr)); + CallArgs.push_back(PrivatePtr); + } + CGF.EmitRuntimeCall(CopyFn, CallArgs); + for (auto &&Pair : PrivatePtrs) { + auto *Replacement = + CGF.Builder.CreateAlignedLoad(Pair.second, CGF.PointerAlignInBytes); + Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; }); + } + } + (void)Scope.Privatize(); + if (*PartId) { + // TODO: emit code for untied tasks. + } + CGF.EmitStmt(CS->getCapturedStmt()); + }; + auto OutlinedFn = + CGM.getOpenMPRuntime().emitTaskOutlinedFunction(S, *I, CodeGen); + // Check if we should emit tied or untied task. + bool Tied = !S.getSingleClause(OMPC_untied); + // Check if the task is final + llvm::PointerIntPair<llvm::Value *, 1, bool> Final; + if (auto *Clause = S.getSingleClause(OMPC_final)) { + // If the condition constant folds and can be elided, try to avoid emitting + // the condition and the dead arm of the if/else. + auto *Cond = cast<OMPFinalClause>(Clause)->getCondition(); + bool CondConstant; + if (ConstantFoldsToSimpleInteger(Cond, CondConstant)) + Final.setInt(CondConstant); + else + Final.setPointer(EvaluateExprAsBool(Cond)); + } else { + // By default the task is not final. + Final.setInt(/*IntVal=*/false); + } + auto SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl()); + const Expr *IfCond = nullptr; + if (auto C = S.getSingleClause(OMPC_if)) { + IfCond = cast<OMPIfClause>(C)->getCondition(); + } + CGM.getOpenMPRuntime().emitTaskCall( + *this, S.getLocStart(), S, Tied, Final, OutlinedFn, SharedsTy, + CapturedStruct, IfCond, PrivateVars, PrivateCopies, FirstprivateVars, + FirstprivateCopies, FirstprivateInits); } -void CodeGenFunction::EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &) { - llvm_unreachable("CodeGen for 'omp taskyield' is not supported yet."); +void CodeGenFunction::EmitOMPTaskyieldDirective( + const OMPTaskyieldDirective &S) { + CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getLocStart()); } void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) { - CGM.getOpenMPRuntime().EmitOMPBarrierCall(*this, S.getLocStart()); + CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getLocStart(), OMPD_barrier); } -void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &) { - llvm_unreachable("CodeGen for 'omp taskwait' is not supported yet."); +void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) { + CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getLocStart()); } void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) { - CGM.getOpenMPRuntime().EmitOMPFlush( - *this, [&]() -> ArrayRef<const Expr *> { - if (auto C = S.getSingleClause(/*K*/ OMPC_flush)) { - auto FlushClause = cast<OMPFlushClause>(C); - return llvm::makeArrayRef(FlushClause->varlist_begin(), - FlushClause->varlist_end()); - } - return llvm::None; - }(), - S.getLocStart()); + CGM.getOpenMPRuntime().emitFlush(*this, [&]() -> ArrayRef<const Expr *> { + if (auto C = S.getSingleClause(/*K*/ OMPC_flush)) { + auto FlushClause = cast<OMPFlushClause>(C); + return llvm::makeArrayRef(FlushClause->varlist_begin(), + FlushClause->varlist_end()); + } + return llvm::None; + }(), S.getLocStart()); +} + +void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) { + LexicalScope Scope(*this, S.getSourceRange()); + auto &&CodeGen = [&S](CodeGenFunction &CGF) { + CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt()); + CGF.EnsureInsertPoint(); + }; + CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getLocStart()); +} + +static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val, + QualType SrcType, QualType DestType) { + assert(CGF.hasScalarEvaluationKind(DestType) && + "DestType must have scalar evaluation kind."); + assert(!Val.isAggregate() && "Must be a scalar or complex."); + return Val.isScalar() + ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestType) + : CGF.EmitComplexToScalarConversion(Val.getComplexVal(), SrcType, + DestType); +} + +static CodeGenFunction::ComplexPairTy +convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType, + QualType DestType) { + assert(CGF.getEvaluationKind(DestType) == TEK_Complex && + "DestType must have complex evaluation kind."); + CodeGenFunction::ComplexPairTy ComplexVal; + if (Val.isScalar()) { + // Convert the input element to the element type of the complex. + auto DestElementType = DestType->castAs<ComplexType>()->getElementType(); + auto ScalarVal = + CGF.EmitScalarConversion(Val.getScalarVal(), SrcType, DestElementType); + ComplexVal = CodeGenFunction::ComplexPairTy( + ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType())); + } else { + assert(Val.isComplex() && "Must be a scalar or complex."); + auto SrcElementType = SrcType->castAs<ComplexType>()->getElementType(); + auto DestElementType = DestType->castAs<ComplexType>()->getElementType(); + ComplexVal.first = CGF.EmitScalarConversion( + Val.getComplexVal().first, SrcElementType, DestElementType); + ComplexVal.second = CGF.EmitScalarConversion( + Val.getComplexVal().second, SrcElementType, DestElementType); + } + return ComplexVal; +} + +static void emitSimpleAtomicStore(CodeGenFunction &CGF, bool IsSeqCst, + LValue LVal, RValue RVal) { + if (LVal.isGlobalReg()) { + CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal); + } else { + CGF.EmitAtomicStore(RVal, LVal, IsSeqCst ? llvm::SequentiallyConsistent + : llvm::Monotonic, + LVal.isVolatile(), /*IsInit=*/false); + } +} + +static void emitSimpleStore(CodeGenFunction &CGF, LValue LVal, RValue RVal, + QualType RValTy) { + switch (CGF.getEvaluationKind(LVal.getType())) { + case TEK_Scalar: + CGF.EmitStoreThroughLValue( + RValue::get(convertToScalarValue(CGF, RVal, RValTy, LVal.getType())), + LVal); + break; + case TEK_Complex: + CGF.EmitStoreOfComplex( + convertToComplexValue(CGF, RVal, RValTy, LVal.getType()), LVal, + /*isInit=*/false); + break; + case TEK_Aggregate: + llvm_unreachable("Must be a scalar or complex."); + } } -void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &) { - llvm_unreachable("CodeGen for 'omp ordered' is not supported yet."); +static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst, + const Expr *X, const Expr *V, + SourceLocation Loc) { + // v = x; + assert(V->isLValue() && "V of 'omp atomic read' is not lvalue"); + assert(X->isLValue() && "X of 'omp atomic read' is not lvalue"); + LValue XLValue = CGF.EmitLValue(X); + LValue VLValue = CGF.EmitLValue(V); + RValue Res = XLValue.isGlobalReg() + ? CGF.EmitLoadOfLValue(XLValue, Loc) + : CGF.EmitAtomicLoad(XLValue, Loc, + IsSeqCst ? llvm::SequentiallyConsistent + : llvm::Monotonic, + XLValue.isVolatile()); + // OpenMP, 2.12.6, atomic Construct + // Any atomic construct with a seq_cst clause forces the atomically + // performed operation to include an implicit flush operation without a + // list. + if (IsSeqCst) + CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); + emitSimpleStore(CGF,VLValue, Res, X->getType().getNonReferenceType()); } -void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &) { - llvm_unreachable("CodeGen for 'omp atomic' is not supported yet."); +static void EmitOMPAtomicWriteExpr(CodeGenFunction &CGF, bool IsSeqCst, + const Expr *X, const Expr *E, + SourceLocation Loc) { + // x = expr; + assert(X->isLValue() && "X of 'omp atomic write' is not lvalue"); + emitSimpleAtomicStore(CGF, IsSeqCst, CGF.EmitLValue(X), CGF.EmitAnyExpr(E)); + // OpenMP, 2.12.6, atomic Construct + // Any atomic construct with a seq_cst clause forces the atomically + // performed operation to include an implicit flush operation without a + // list. + if (IsSeqCst) + CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); +} + +static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X, + RValue Update, + BinaryOperatorKind BO, + llvm::AtomicOrdering AO, + bool IsXLHSInRHSPart) { + auto &Context = CGF.CGM.getContext(); + // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x' + // expression is simple and atomic is allowed for the given type for the + // target platform. + if (BO == BO_Comma || !Update.isScalar() || + !Update.getScalarVal()->getType()->isIntegerTy() || + !X.isSimple() || (!isa<llvm::ConstantInt>(Update.getScalarVal()) && + (Update.getScalarVal()->getType() != + X.getAddress()->getType()->getPointerElementType())) || + !X.getAddress()->getType()->getPointerElementType()->isIntegerTy() || + !Context.getTargetInfo().hasBuiltinAtomic( + Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment()))) + return std::make_pair(false, RValue::get(nullptr)); + + llvm::AtomicRMWInst::BinOp RMWOp; + switch (BO) { + case BO_Add: + RMWOp = llvm::AtomicRMWInst::Add; + break; + case BO_Sub: + if (!IsXLHSInRHSPart) + return std::make_pair(false, RValue::get(nullptr)); + RMWOp = llvm::AtomicRMWInst::Sub; + break; + case BO_And: + RMWOp = llvm::AtomicRMWInst::And; + break; + case BO_Or: + RMWOp = llvm::AtomicRMWInst::Or; + break; + case BO_Xor: + RMWOp = llvm::AtomicRMWInst::Xor; + break; + case BO_LT: + RMWOp = X.getType()->hasSignedIntegerRepresentation() + ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min + : llvm::AtomicRMWInst::Max) + : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin + : llvm::AtomicRMWInst::UMax); + break; + case BO_GT: + RMWOp = X.getType()->hasSignedIntegerRepresentation() + ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max + : llvm::AtomicRMWInst::Min) + : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax + : llvm::AtomicRMWInst::UMin); + break; + case BO_Assign: + RMWOp = llvm::AtomicRMWInst::Xchg; + break; + case BO_Mul: + case BO_Div: + case BO_Rem: + case BO_Shl: + case BO_Shr: + case BO_LAnd: + case BO_LOr: + return std::make_pair(false, RValue::get(nullptr)); + case BO_PtrMemD: + case BO_PtrMemI: + case BO_LE: + case BO_GE: + case BO_EQ: + case BO_NE: + case BO_AddAssign: + case BO_SubAssign: + case BO_AndAssign: + case BO_OrAssign: + case BO_XorAssign: + case BO_MulAssign: + case BO_DivAssign: + case BO_RemAssign: + case BO_ShlAssign: + case BO_ShrAssign: + case BO_Comma: + llvm_unreachable("Unsupported atomic update operation"); + } + auto *UpdateVal = Update.getScalarVal(); + if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) { + UpdateVal = CGF.Builder.CreateIntCast( + IC, X.getAddress()->getType()->getPointerElementType(), + X.getType()->hasSignedIntegerRepresentation()); + } + auto *Res = CGF.Builder.CreateAtomicRMW(RMWOp, X.getAddress(), UpdateVal, AO); + return std::make_pair(true, RValue::get(Res)); +} + +std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr( + LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, + llvm::AtomicOrdering AO, SourceLocation Loc, + const llvm::function_ref<RValue(RValue)> &CommonGen) { + // Update expressions are allowed to have the following forms: + // x binop= expr; -> xrval + expr; + // x++, ++x -> xrval + 1; + // x--, --x -> xrval - 1; + // x = x binop expr; -> xrval binop expr + // x = expr Op x; - > expr binop xrval; + auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart); + if (!Res.first) { + if (X.isGlobalReg()) { + // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop + // 'xrval'. + EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X); + } else { + // Perform compare-and-swap procedure. + EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified()); + } + } + return Res; +} + +static void EmitOMPAtomicUpdateExpr(CodeGenFunction &CGF, bool IsSeqCst, + const Expr *X, const Expr *E, + const Expr *UE, bool IsXLHSInRHSPart, + SourceLocation Loc) { + assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && + "Update expr in 'atomic update' must be a binary operator."); + auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); + // Update expressions are allowed to have the following forms: + // x binop= expr; -> xrval + expr; + // x++, ++x -> xrval + 1; + // x--, --x -> xrval - 1; + // x = x binop expr; -> xrval binop expr + // x = expr Op x; - > expr binop xrval; + assert(X->isLValue() && "X of 'omp atomic update' is not lvalue"); + LValue XLValue = CGF.EmitLValue(X); + RValue ExprRValue = CGF.EmitAnyExpr(E); + auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic; + auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); + auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); + auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; + auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; + auto Gen = + [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) -> RValue { + CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); + CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); + return CGF.EmitAnyExpr(UE); + }; + (void)CGF.EmitOMPAtomicSimpleUpdateExpr( + XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); + // OpenMP, 2.12.6, atomic Construct + // Any atomic construct with a seq_cst clause forces the atomically + // performed operation to include an implicit flush operation without a + // list. + if (IsSeqCst) + CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); +} + +static RValue convertToType(CodeGenFunction &CGF, RValue Value, + QualType SourceType, QualType ResType) { + switch (CGF.getEvaluationKind(ResType)) { + case TEK_Scalar: + return RValue::get(convertToScalarValue(CGF, Value, SourceType, ResType)); + case TEK_Complex: { + auto Res = convertToComplexValue(CGF, Value, SourceType, ResType); + return RValue::getComplex(Res.first, Res.second); + } + case TEK_Aggregate: + break; + } + llvm_unreachable("Must be a scalar or complex."); +} + +static void EmitOMPAtomicCaptureExpr(CodeGenFunction &CGF, bool IsSeqCst, + bool IsPostfixUpdate, const Expr *V, + const Expr *X, const Expr *E, + const Expr *UE, bool IsXLHSInRHSPart, + SourceLocation Loc) { + assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue"); + assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue"); + RValue NewVVal; + LValue VLValue = CGF.EmitLValue(V); + LValue XLValue = CGF.EmitLValue(X); + RValue ExprRValue = CGF.EmitAnyExpr(E); + auto AO = IsSeqCst ? llvm::SequentiallyConsistent : llvm::Monotonic; + QualType NewVValType; + if (UE) { + // 'x' is updated with some additional value. + assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) && + "Update expr in 'atomic capture' must be a binary operator."); + auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts()); + // Update expressions are allowed to have the following forms: + // x binop= expr; -> xrval + expr; + // x++, ++x -> xrval + 1; + // x--, --x -> xrval - 1; + // x = x binop expr; -> xrval binop expr + // x = expr Op x; - > expr binop xrval; + auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts()); + auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts()); + auto *XRValExpr = IsXLHSInRHSPart ? LHS : RHS; + NewVValType = XRValExpr->getType(); + auto *ERValExpr = IsXLHSInRHSPart ? RHS : LHS; + auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr, + IsSeqCst, IsPostfixUpdate](RValue XRValue) -> RValue { + CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); + CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue); + RValue Res = CGF.EmitAnyExpr(UE); + NewVVal = IsPostfixUpdate ? XRValue : Res; + return Res; + }; + auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( + XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen); + if (Res.first) { + // 'atomicrmw' instruction was generated. + if (IsPostfixUpdate) { + // Use old value from 'atomicrmw'. + NewVVal = Res.second; + } else { + // 'atomicrmw' does not provide new value, so evaluate it using old + // value of 'x'. + CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue); + CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second); + NewVVal = CGF.EmitAnyExpr(UE); + } + } + } else { + // 'x' is simply rewritten with some 'expr'. + NewVValType = X->getType().getNonReferenceType(); + ExprRValue = convertToType(CGF, ExprRValue, E->getType(), + X->getType().getNonReferenceType()); + auto &&Gen = [&CGF, &NewVVal, ExprRValue](RValue XRValue) -> RValue { + NewVVal = XRValue; + return ExprRValue; + }; + // Try to perform atomicrmw xchg, otherwise simple exchange. + auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr( + XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO, + Loc, Gen); + if (Res.first) { + // 'atomicrmw' instruction was generated. + NewVVal = IsPostfixUpdate ? Res.second : ExprRValue; + } + } + // Emit post-update store to 'v' of old/new 'x' value. + emitSimpleStore(CGF, VLValue, NewVVal, NewVValType); + // OpenMP, 2.12.6, atomic Construct + // Any atomic construct with a seq_cst clause forces the atomically + // performed operation to include an implicit flush operation without a + // list. + if (IsSeqCst) + CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc); +} + +static void EmitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind, + bool IsSeqCst, bool IsPostfixUpdate, + const Expr *X, const Expr *V, const Expr *E, + const Expr *UE, bool IsXLHSInRHSPart, + SourceLocation Loc) { + switch (Kind) { + case OMPC_read: + EmitOMPAtomicReadExpr(CGF, IsSeqCst, X, V, Loc); + break; + case OMPC_write: + EmitOMPAtomicWriteExpr(CGF, IsSeqCst, X, E, Loc); + break; + case OMPC_unknown: + case OMPC_update: + EmitOMPAtomicUpdateExpr(CGF, IsSeqCst, X, E, UE, IsXLHSInRHSPart, Loc); + break; + case OMPC_capture: + EmitOMPAtomicCaptureExpr(CGF, IsSeqCst, IsPostfixUpdate, V, X, E, UE, + IsXLHSInRHSPart, Loc); + break; + case OMPC_if: + case OMPC_final: + case OMPC_num_threads: + case OMPC_private: + case OMPC_firstprivate: + case OMPC_lastprivate: + case OMPC_reduction: + case OMPC_safelen: + case OMPC_collapse: + case OMPC_default: + case OMPC_seq_cst: + case OMPC_shared: + case OMPC_linear: + case OMPC_aligned: + case OMPC_copyin: + case OMPC_copyprivate: + case OMPC_flush: + case OMPC_proc_bind: + case OMPC_schedule: + case OMPC_ordered: + case OMPC_nowait: + case OMPC_untied: + case OMPC_threadprivate: + case OMPC_mergeable: + llvm_unreachable("Clause is not allowed in 'omp atomic'."); + } +} + +void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) { + bool IsSeqCst = S.getSingleClause(/*K=*/OMPC_seq_cst); + OpenMPClauseKind Kind = OMPC_unknown; + for (auto *C : S.clauses()) { + // Find first clause (skip seq_cst clause, if it is first). + if (C->getClauseKind() != OMPC_seq_cst) { + Kind = C->getClauseKind(); + break; + } + } + + const auto *CS = + S.getAssociatedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true); + if (const auto *EWC = dyn_cast<ExprWithCleanups>(CS)) { + enterFullExpression(EWC); + } + // Processing for statements under 'atomic capture'. + if (const auto *Compound = dyn_cast<CompoundStmt>(CS)) { + for (const auto *C : Compound->body()) { + if (const auto *EWC = dyn_cast<ExprWithCleanups>(C)) { + enterFullExpression(EWC); + } + } + } + + LexicalScope Scope(*this, S.getSourceRange()); + auto &&CodeGen = [&S, Kind, IsSeqCst](CodeGenFunction &CGF) { + EmitOMPAtomicExpr(CGF, Kind, IsSeqCst, S.isPostfixUpdate(), S.getX(), + S.getV(), S.getExpr(), S.getUpdateExpr(), + S.isXLHSInRHSPart(), S.getLocStart()); + }; + CGM.getOpenMPRuntime().emitInlinedDirective(*this, CodeGen); } void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &) { @@ -702,4 +2015,3 @@ void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &) { void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &) { llvm_unreachable("CodeGen for 'omp teams' is not supported yet."); } - diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp index bd280ea..e3df5a4 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp @@ -18,7 +18,7 @@ using namespace clang; using namespace CodeGen; -static llvm::Constant * +static llvm::GlobalVariable * GetAddrOfVTTVTable(CodeGenVTables &CGVT, CodeGenModule &CGM, const CXXRecordDecl *MostDerivedClass, const VTTVTable &VTable, @@ -47,8 +47,8 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT, llvm::Type *Int8PtrTy = CGM.Int8PtrTy, *Int64Ty = CGM.Int64Ty; llvm::ArrayType *ArrayType = llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size()); - - SmallVector<llvm::Constant *, 8> VTables; + + SmallVector<llvm::GlobalVariable *, 8> VTables; SmallVector<VTableAddressPointsMapTy, 8> VTableAddressPoints; for (const VTTVTable *i = Builder.getVTTVTables().begin(), *e = Builder.getVTTVTables().end(); i != e; ++i) { @@ -61,7 +61,7 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT, for (const VTTComponent *i = Builder.getVTTComponents().begin(), *e = Builder.getVTTComponents().end(); i != e; ++i) { const VTTVTable &VTTVT = Builder.getVTTVTables()[i->VTableIndex]; - llvm::Constant *VTable = VTables[i->VTableIndex]; + llvm::GlobalVariable *VTable = VTables[i->VTableIndex]; uint64_t AddressPoint; if (VTTVT.getBase() == RD) { // Just get the address point for the regular vtable. @@ -79,8 +79,8 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT, llvm::ConstantInt::get(Int64Ty, AddressPoint) }; - llvm::Constant *Init = - llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Idxs); + llvm::Constant *Init = llvm::ConstantExpr::getInBoundsGetElementPtr( + VTable->getValueType(), VTable, Idxs); Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy); @@ -94,6 +94,9 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT, // Set the correct linkage. VTT->setLinkage(Linkage); + if (CGM.supportsCOMDAT() && VTT->isWeakForLinker()) + VTT->setComdat(CGM.getModule().getOrInsertComdat(VTT->getName())); + // Set the right visibility. CGM.setGlobalVisibility(VTT, RD); } @@ -174,4 +177,3 @@ CodeGenVTables::getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD, return I->second; } - diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp index acb2a56..57370a6 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp @@ -377,7 +377,10 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn, // Set the right linkage. CGM.setFunctionLinkage(GD, Fn); - + + if (CGM.supportsCOMDAT() && Fn->isWeakForLinker()) + Fn->setComdat(CGM.getModule().getOrInsertComdat(Fn->getName())); + // Set the right visibility. const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); setThunkVisibility(CGM, MD, Thunk, Fn); @@ -666,6 +669,8 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD, VTLayout->getNumVTableThunks(), RTTI); VTable->setInitializer(Init); + CGM.EmitVTableBitSetEntries(VTable, *VTLayout.get()); + return VTable; } @@ -738,7 +743,7 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) { return DiscardableODRLinkage; case TSK_ExplicitInstantiationDeclaration: - llvm_unreachable("Should not have been asked to emit this"); + return llvm::GlobalVariable::ExternalLinkage; case TSK_ExplicitInstantiationDefinition: return NonDiscardableODRLinkage; @@ -747,19 +752,13 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) { llvm_unreachable("Invalid TemplateSpecializationKind!"); } -/// This is a callback from Sema to tell us that it believes that a -/// particular v-table is required to be emitted in this translation -/// unit. +/// This is a callback from Sema to tell us that that a particular v-table is +/// required to be emitted in this translation unit. /// -/// The reason we don't simply trust this callback is because Sema -/// will happily report that something is used even when it's used -/// only in code that we don't actually have to emit. -/// -/// \param isRequired - if true, the v-table is mandatory, e.g. -/// because the translation unit defines the key function -void CodeGenModule::EmitVTable(CXXRecordDecl *theClass, bool isRequired) { - if (!isRequired) return; - +/// This is only called for vtables that _must_ be emitted (mainly due to key +/// functions). For weak vtables, CodeGen tracks when they are needed and +/// emits them as-needed. +void CodeGenModule::EmitVTable(CXXRecordDecl *theClass) { VTables.GenerateClassData(theClass); } @@ -840,3 +839,68 @@ void CodeGenModule::EmitDeferredVTables() { "deferred extra v-tables during v-table emission?"); DeferredVTables.clear(); } + +void CodeGenModule::EmitVTableBitSetEntries(llvm::GlobalVariable *VTable, + const VTableLayout &VTLayout) { + if (!LangOpts.Sanitize.has(SanitizerKind::CFIVCall) && + !LangOpts.Sanitize.has(SanitizerKind::CFINVCall) && + !LangOpts.Sanitize.has(SanitizerKind::CFIDerivedCast) && + !LangOpts.Sanitize.has(SanitizerKind::CFIUnrelatedCast)) + return; + + llvm::Metadata *VTableMD = llvm::ConstantAsMetadata::get(VTable); + + std::vector<llvm::MDTuple *> BitsetEntries; + // Create a bit set entry for each address point. + for (auto &&AP : VTLayout.getAddressPoints()) { + // FIXME: Add blacklisting scheme. + if (AP.first.getBase()->isInStdNamespace()) + continue; + + std::string OutName; + llvm::raw_string_ostream Out(OutName); + getCXXABI().getMangleContext().mangleCXXVTableBitSet(AP.first.getBase(), + Out); + + CharUnits PointerWidth = + Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0)); + uint64_t AddrPointOffset = AP.second * PointerWidth.getQuantity(); + + llvm::Metadata *BitsetOps[] = { + llvm::MDString::get(getLLVMContext(), Out.str()), + VTableMD, + llvm::ConstantAsMetadata::get( + llvm::ConstantInt::get(Int64Ty, AddrPointOffset))}; + llvm::MDTuple *BitsetEntry = + llvm::MDTuple::get(getLLVMContext(), BitsetOps); + BitsetEntries.push_back(BitsetEntry); + } + + // Sort the bit set entries for determinism. + std::sort(BitsetEntries.begin(), BitsetEntries.end(), [](llvm::MDTuple *T1, + llvm::MDTuple *T2) { + if (T1 == T2) + return false; + + StringRef S1 = cast<llvm::MDString>(T1->getOperand(0))->getString(); + StringRef S2 = cast<llvm::MDString>(T2->getOperand(0))->getString(); + if (S1 < S2) + return true; + if (S1 != S2) + return false; + + uint64_t Offset1 = cast<llvm::ConstantInt>( + cast<llvm::ConstantAsMetadata>(T1->getOperand(2)) + ->getValue())->getZExtValue(); + uint64_t Offset2 = cast<llvm::ConstantInt>( + cast<llvm::ConstantAsMetadata>(T2->getOperand(2)) + ->getValue())->getZExtValue(); + assert(Offset1 != Offset2); + return Offset1 < Offset2; + }); + + llvm::NamedMDNode *BitsetsMD = + getModule().getOrInsertNamedMetadata("llvm.bitsets"); + for (auto BitsetEntry : BitsetEntries) + BitsetsMD->addOperand(BitsetEntry); +} diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h b/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h index 82cd949..9205591 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h @@ -19,6 +19,7 @@ #include "clang/AST/CharUnits.h" #include "clang/AST/Type.h" #include "llvm/IR/Value.h" +#include "llvm/IR/Type.h" namespace llvm { class Constant; @@ -299,6 +300,7 @@ public: LValue R; R.LVType = Simple; + assert(address->getType()->isPointerTy()); R.V = address; R.Initialize(type, qs, alignment, TBAAInfo); return R; diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp index a6f6fde..7e82fcc 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp @@ -46,7 +46,7 @@ namespace clang { const CodeGenOptions &CodeGenOpts; const TargetOptions &TargetOpts; const LangOptions &LangOpts; - raw_ostream *AsmOutStream; + raw_pwrite_stream *AsmOutStream; ASTContext *Context; Timer LLVMIRGeneration; @@ -56,18 +56,17 @@ namespace clang { std::unique_ptr<llvm::Module> TheModule, LinkModule; public: - BackendConsumer(BackendAction action, DiagnosticsEngine &_Diags, - const CodeGenOptions &compopts, - const TargetOptions &targetopts, - const LangOptions &langopts, bool TimePasses, - const std::string &infile, llvm::Module *LinkModule, - raw_ostream *OS, LLVMContext &C, + BackendConsumer(BackendAction Action, DiagnosticsEngine &Diags, + const CodeGenOptions &CodeGenOpts, + const TargetOptions &TargetOpts, + const LangOptions &LangOpts, bool TimePasses, + const std::string &InFile, llvm::Module *LinkModule, + raw_pwrite_stream *OS, LLVMContext &C, CoverageSourceInfo *CoverageInfo = nullptr) - : Diags(_Diags), Action(action), CodeGenOpts(compopts), - TargetOpts(targetopts), LangOpts(langopts), AsmOutStream(OS), + : Diags(Diags), Action(Action), CodeGenOpts(CodeGenOpts), + TargetOpts(TargetOpts), LangOpts(LangOpts), AsmOutStream(OS), Context(nullptr), LLVMIRGeneration("LLVM IR Generation Time"), - Gen(CreateLLVMCodeGen(Diags, infile, compopts, - targetopts, C, CoverageInfo)), + Gen(CreateLLVMCodeGen(Diags, InFile, CodeGenOpts, C, CoverageInfo)), LinkModule(LinkModule) { llvm::TimePassesIsEnabled = TimePasses; } @@ -80,6 +79,11 @@ namespace clang { } void Initialize(ASTContext &Ctx) override { + if (Context) { + assert(Context == &Ctx); + return; + } + Context = &Ctx; if (llvm::TimePassesIsEnabled) @@ -196,8 +200,8 @@ namespace clang { Gen->CompleteTentativeDefinition(D); } - void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired) override { - Gen->HandleVTable(RD, DefinitionRequired); + void HandleVTable(CXXRecordDecl *RD) override { + Gen->HandleVTable(RD); } void HandleLinkerOptionPragma(llvm::StringRef Opts) override { @@ -430,13 +434,16 @@ void BackendConsumer::EmitOptimizationMessage( FileManager &FileMgr = SourceMgr.getFileManager(); StringRef Filename; unsigned Line, Column; - D.getLocation(&Filename, &Line, &Column); SourceLocation DILoc; - const FileEntry *FE = FileMgr.getFile(Filename); - if (FE && Line > 0) { - // If -gcolumn-info was not used, Column will be 0. This upsets the - // source manager, so pass 1 if Column is not set. - DILoc = SourceMgr.translateFileLineCol(FE, Line, Column ? Column : 1); + + if (D.isLocationAvailable()) { + D.getLocation(&Filename, &Line, &Column); + const FileEntry *FE = FileMgr.getFile(Filename); + if (FE && Line > 0) { + // If -gcolumn-info was not used, Column will be 0. This upsets the + // source manager, so pass 1 if Column is not set. + DILoc = SourceMgr.translateFileLineCol(FE, Line, Column ? Column : 1); + } } // If a location isn't available, try to approximate it using the associated @@ -451,7 +458,7 @@ void BackendConsumer::EmitOptimizationMessage( << AddFlagValue(D.getPassName() ? D.getPassName() : "") << D.getMsg().str(); - if (DILoc.isInvalid()) + if (DILoc.isInvalid() && D.isLocationAvailable()) // If we were not able to translate the file:line:col information // back to a SourceLocation, at least emit a note stating that // we could not translate this location. This can happen in the @@ -602,9 +609,8 @@ llvm::LLVMContext *CodeGenAction::takeLLVMContext() { return VMContext; } -static raw_ostream *GetOutputStream(CompilerInstance &CI, - StringRef InFile, - BackendAction Action) { +static raw_pwrite_stream * +GetOutputStream(CompilerInstance &CI, StringRef InFile, BackendAction Action) { switch (Action) { case Backend_EmitAssembly: return CI.createDefaultOutputFile(false, InFile, "s"); @@ -626,7 +632,7 @@ static raw_ostream *GetOutputStream(CompilerInstance &CI, std::unique_ptr<ASTConsumer> CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) { BackendAction BA = static_cast<BackendAction>(Act); - std::unique_ptr<raw_ostream> OS(GetOutputStream(CI, InFile, BA)); + raw_pwrite_stream *OS = GetOutputStream(CI, InFile, BA); if (BA != Backend_EmitNothing && !OS) return nullptr; @@ -663,17 +669,23 @@ CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) { std::unique_ptr<BackendConsumer> Result(new BackendConsumer( BA, CI.getDiagnostics(), CI.getCodeGenOpts(), CI.getTargetOpts(), CI.getLangOpts(), CI.getFrontendOpts().ShowTimers, InFile, - LinkModuleToUse, OS.release(), *VMContext, CoverageInfo)); + LinkModuleToUse, OS, *VMContext, CoverageInfo)); BEConsumer = Result.get(); return std::move(Result); } +static void BitcodeInlineAsmDiagHandler(const llvm::SMDiagnostic &SM, + void *Context, + unsigned LocCookie) { + SM.print(nullptr, llvm::errs()); +} + void CodeGenAction::ExecuteAction() { // If this is an IR file, we have to treat it specially. if (getCurrentFileKind() == IK_LLVM_IR) { BackendAction BA = static_cast<BackendAction>(Act); CompilerInstance &CI = getCompilerInstance(); - raw_ostream *OS = GetOutputStream(CI, getCurrentFile(), BA); + raw_pwrite_stream *OS = GetOutputStream(CI, getCurrentFile(), BA); if (BA != Backend_EmitNothing && !OS) return; @@ -710,14 +722,14 @@ void CodeGenAction::ExecuteAction() { } const TargetOptions &TargetOpts = CI.getTargetOpts(); if (TheModule->getTargetTriple() != TargetOpts.Triple) { - unsigned DiagID = CI.getDiagnostics().getCustomDiagID( - DiagnosticsEngine::Warning, - "overriding the module target triple with %0"); - - CI.getDiagnostics().Report(SourceLocation(), DiagID) << TargetOpts.Triple; + CI.getDiagnostics().Report(SourceLocation(), + diag::warn_fe_override_module) + << TargetOpts.Triple; TheModule->setTargetTriple(TargetOpts.Triple); } + LLVMContext &Ctx = TheModule->getContext(); + Ctx.setInlineAsmDiagnosticHandler(BitcodeInlineAsmDiagHandler); EmitBackendOutput(CI.getDiagnostics(), CI.getCodeGenOpts(), TargetOpts, CI.getLangOpts(), CI.getTarget().getTargetDescription(), TheModule.get(), BA, OS); diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp index 826171a..01da750 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "CodeGenFunction.h" +#include "CGCleanup.h" #include "CGCUDARuntime.h" #include "CGCXXABI.h" #include "CGDebugInfo.h" @@ -40,10 +41,11 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext) CurFn(nullptr), CapturedStmtInfo(nullptr), SanOpts(CGM.getLangOpts().Sanitize), IsSanitizerScope(false), CurFuncIsThunk(false), AutoreleaseResult(false), SawAsmBlock(false), - BlockInfo(nullptr), BlockPointer(nullptr), + IsOutlinedSEHHelper(false), BlockInfo(nullptr), BlockPointer(nullptr), LambdaThisCaptureField(nullptr), NormalCleanupDest(nullptr), NextCleanupDestIndex(1), FirstBlockInfo(nullptr), EHResumeBlock(nullptr), ExceptionSlot(nullptr), EHSelectorSlot(nullptr), + AbnormalTerminationSlot(nullptr), SEHPointersDecl(nullptr), DebugInfo(CGM.getModuleDebugInfo()), DisableDebugInfo(false), DidCallStackSave(false), IndirectBranch(nullptr), PGO(cgm), SwitchInsn(nullptr), SwitchWeights(nullptr), CaseRangeBlock(nullptr), @@ -69,6 +71,9 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext) if (CGM.getCodeGenOpts().NoSignedZeros) { FMF.setNoSignedZeros(); } + if (CGM.getCodeGenOpts().ReciprocalMath) { + FMF.setAllowReciprocal(); + } Builder.SetFastMathFlags(FMF); } @@ -82,7 +87,7 @@ CodeGenFunction::~CodeGenFunction() { destroyBlockInfos(FirstBlockInfo); if (getLangOpts().OpenMP) { - CGM.getOpenMPRuntime().FunctionFinished(*this); + CGM.getOpenMPRuntime().functionFinished(*this); } } @@ -239,17 +244,18 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { // parameters. Do this in whatever block we're currently in; it's // important to do this before we enter the return block or return // edges will be *really* confused. - bool EmitRetDbgLoc = true; - if (EHStack.stable_begin() != PrologueCleanupDepth) { - PopCleanupBlocks(PrologueCleanupDepth); - + bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth; + bool HasOnlyLifetimeMarkers = + HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth); + bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers; + if (HasCleanups) { // Make sure the line table doesn't jump back into the body for // the ret after it's been at EndLoc. - EmitRetDbgLoc = false; - if (CGDebugInfo *DI = getDebugInfo()) if (OnlySimpleReturnStmts) DI->EmitLocation(Builder, EndLoc); + + PopCleanupBlocks(PrologueCleanupDepth); } // Emit function epilog (to return). @@ -278,6 +284,20 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { Builder.ClearInsertionPoint(); } + // If some of our locals escaped, insert a call to llvm.frameescape in the + // entry block. + if (!EscapedLocals.empty()) { + // Invert the map from local to index into a simple vector. There should be + // no holes. + SmallVector<llvm::Value *, 4> EscapeArgs; + EscapeArgs.resize(EscapedLocals.size()); + for (auto &Pair : EscapedLocals) + EscapeArgs[Pair.second] = Pair.first; + llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration( + &CGM.getModule(), llvm::Intrinsic::frameescape); + CGBuilderTy(AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs); + } + // Remove the AllocaInsertPt instruction, which is just a convenience for us. llvm::Instruction *Ptr = AllocaInsertPt; AllocaInsertPt = nullptr; @@ -588,6 +608,20 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, if (CGM.isInSanitizerBlacklist(Fn, Loc)) SanOpts.clear(); + if (D) { + // Apply the no_sanitize* attributes to SanOpts. + for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) + SanOpts.Mask &= ~Attr->getMask(); + } + + // Apply sanitizer attributes to the function. + if (SanOpts.has(SanitizerKind::Address)) + Fn->addFnAttr(llvm::Attribute::SanitizeAddress); + if (SanOpts.has(SanitizerKind::Thread)) + Fn->addFnAttr(llvm::Attribute::SanitizeThread); + if (SanOpts.has(SanitizerKind::Memory)) + Fn->addFnAttr(llvm::Attribute::SanitizeMemory); + // Pass inline keyword to optimizer if it appears explicitly on any // declaration. Also, in the case of -fno-inline attach NoInline // attribute to all function that are not marked AlwaysInline. @@ -679,7 +713,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex(); llvm::Function::arg_iterator EI = CurFn->arg_end(); --EI; - llvm::Value *Addr = Builder.CreateStructGEP(EI, Idx); + llvm::Value *Addr = Builder.CreateStructGEP(nullptr, EI, Idx); ReturnValue = Builder.CreateLoad(Addr, "agg.result"); } else { ReturnValue = CreateIRTemp(RetTy, "retval"); @@ -753,8 +787,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args, const Stmt *Body) { - RegionCounter Cnt = getPGORegionCounter(Body); - Cnt.beginRegion(Builder); + incrementProfileCounter(Body); if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body)) EmitCompoundStmtWithoutScope(*S); else @@ -766,7 +799,7 @@ void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args, /// emit a branch around the instrumentation code. When not instrumenting, /// this just calls EmitBlock(). void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB, - RegionCounter &Cnt) { + const Stmt *S) { llvm::BasicBlock *SkipCountBB = nullptr; if (HaveInsertPoint() && CGM.getCodeGenOpts().ProfileInstrGenerate) { // When instrumenting for profiling, the fallthrough to certain @@ -776,7 +809,9 @@ void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB, EmitBranch(SkipCountBB); } EmitBlock(BB); - Cnt.beginRegion(Builder, /*AddIncomingFallThrough=*/true); + uint64_t CurrentCount = getCurrentProfileCount(); + incrementProfileCounter(S); + setCurrentProfileCount(getCurrentProfileCount() + CurrentCount); if (SkipCountBB) EmitBlock(SkipCountBB); } @@ -801,17 +836,6 @@ static void TryMarkNoThrow(llvm::Function *F) { F->setDoesNotThrow(); } -static void EmitSizedDeallocationFunction(CodeGenFunction &CGF, - const FunctionDecl *UnsizedDealloc) { - // This is a weak discardable definition of the sized deallocation function. - CGF.CurFn->setLinkage(llvm::Function::LinkOnceAnyLinkage); - - // Call the unsized deallocation function and forward the first argument - // unchanged. - llvm::Constant *Unsized = CGF.CGM.GetAddrOfFunction(UnsizedDealloc); - CGF.Builder.CreateCall(Unsized, &*CGF.CurFn->arg_begin()); -} - void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn, const CGFunctionInfo &FnInfo) { const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); @@ -832,7 +856,7 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn, ResTy = CGM.getContext().VoidPtrTy; CGM.getCXXABI().buildThisParam(*this, Args); } - + Args.append(FD->param_begin(), FD->param_end()); if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD))) @@ -866,9 +890,9 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn, else if (isa<CXXConstructorDecl>(FD)) EmitConstructorBody(Args); else if (getLangOpts().CUDA && - !CGM.getCodeGenOpts().CUDAIsDevice && + !getLangOpts().CUDAIsDevice && FD->hasAttr<CUDAGlobalAttr>()) - CGM.getCUDARuntime().EmitDeviceStubBody(*this, Args); + CGM.getCUDARuntime().emitDeviceStub(*this, Args); else if (isa<CXXConversionDecl>(FD) && cast<CXXConversionDecl>(FD)->isLambdaToBlockPointerConversion()) { // The lambda conversion to block pointer is special; the semantics can't be @@ -887,11 +911,6 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn, emitImplicitAssignmentOperatorBody(Args); } else if (Stmt *Body = FD->getBody()) { EmitFunctionBody(Args, Body); - } else if (FunctionDecl *UnsizedDealloc = - FD->getCorrespondingUnsizedGlobalDeallocationFunction()) { - // Global sized deallocation functions get an implicit weak definition if - // they don't have an explicit definition. - EmitSizedDeallocationFunction(*this, UnsizedDealloc); } else llvm_unreachable("no definition for emitted function"); @@ -910,7 +929,7 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn, "missing_return", EmitCheckSourceLocation(FD->getLocation()), None); } else if (CGM.getCodeGenOpts().OptimizationLevel == 0) - Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::trap)); + Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::trap), {}); Builder.CreateUnreachable(); Builder.ClearInsertionPoint(); } @@ -1028,15 +1047,13 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, // Handle X && Y in a condition. if (CondBOp->getOpcode() == BO_LAnd) { - RegionCounter Cnt = getPGORegionCounter(CondBOp); - // If we have "1 && X", simplify the code. "0 && X" would have constant // folded if the case was simple enough. bool ConstantBool = false; if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && ConstantBool) { // br(1 && X) -> br(X). - Cnt.beginRegion(Builder); + incrementProfileCounter(CondBOp); return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount); } @@ -1055,14 +1072,19 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true"); // The counter tells us how often we evaluate RHS, and all of TrueCount // can be propagated to that branch. - uint64_t RHSCount = Cnt.getCount(); + uint64_t RHSCount = getProfileCount(CondBOp->getRHS()); ConditionalEvaluation eval(*this); - EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount); - EmitBlock(LHSTrue); + { + ApplyDebugLocation DL(*this, Cond); + EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount); + EmitBlock(LHSTrue); + } + + incrementProfileCounter(CondBOp); + setCurrentProfileCount(getProfileCount(CondBOp->getRHS())); // Any temporaries created here are conditional. - Cnt.beginRegion(Builder); eval.begin(*this); EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount); eval.end(*this); @@ -1071,15 +1093,13 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, } if (CondBOp->getOpcode() == BO_LOr) { - RegionCounter Cnt = getPGORegionCounter(CondBOp); - // If we have "0 || X", simplify the code. "1 || X" would have constant // folded if the case was simple enough. bool ConstantBool = false; if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && !ConstantBool) { // br(0 || X) -> br(X). - Cnt.beginRegion(Builder); + incrementProfileCounter(CondBOp); return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount); } @@ -1099,15 +1119,21 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, // We have the count for entry to the RHS and for the whole expression // being true, so we can divy up True count between the short circuit and // the RHS. - uint64_t LHSCount = Cnt.getParentCount() - Cnt.getCount(); + uint64_t LHSCount = + getCurrentProfileCount() - getProfileCount(CondBOp->getRHS()); uint64_t RHSCount = TrueCount - LHSCount; ConditionalEvaluation eval(*this); - EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount); - EmitBlock(LHSFalse); + { + ApplyDebugLocation DL(*this, Cond); + EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount); + EmitBlock(LHSFalse); + } + + incrementProfileCounter(CondBOp); + setCurrentProfileCount(getProfileCount(CondBOp->getRHS())); // Any temporaries created here are conditional. - Cnt.beginRegion(Builder); eval.begin(*this); EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount); @@ -1121,7 +1147,7 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, // br(!x, t, f) -> br(x, f, t) if (CondUOp->getOpcode() == UO_LNot) { // Negate the count. - uint64_t FalseCount = PGO.getCurrentRegionCount() - TrueCount; + uint64_t FalseCount = getCurrentProfileCount() - TrueCount; // Negate the condition and swap the destination blocks. return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock, FalseCount); @@ -1133,9 +1159,9 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); - RegionCounter Cnt = getPGORegionCounter(CondOp); ConditionalEvaluation cond(*this); - EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock, Cnt.getCount()); + EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock, + getProfileCount(CondOp)); // When computing PGO branch weights, we only know the overall count for // the true block. This code is essentially doing tail duplication of the @@ -1144,15 +1170,19 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, // the conditional operator. uint64_t LHSScaledTrueCount = 0; if (TrueCount) { - double LHSRatio = Cnt.getCount() / (double) Cnt.getParentCount(); + double LHSRatio = + getProfileCount(CondOp) / (double)getCurrentProfileCount(); LHSScaledTrueCount = TrueCount * LHSRatio; } cond.begin(*this); EmitBlock(LHSBlock); - Cnt.beginRegion(Builder); - EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock, - LHSScaledTrueCount); + incrementProfileCounter(CondOp); + { + ApplyDebugLocation DL(*this, Cond); + EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock, + LHSScaledTrueCount); + } cond.end(*this); cond.begin(*this); @@ -1176,12 +1206,16 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, // Create branch weights based on the number of times we get here and the // number of times the condition should be true. - uint64_t CurrentCount = std::max(PGO.getCurrentRegionCount(), TrueCount); - llvm::MDNode *Weights = PGO.createBranchWeights(TrueCount, - CurrentCount - TrueCount); + uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount); + llvm::MDNode *Weights = + createProfileWeights(TrueCount, CurrentCount - TrueCount); // Emit the code with the fully general case. - llvm::Value *CondV = EvaluateExprAsBool(Cond); + llvm::Value *CondV; + { + ApplyDebugLocation DL(*this, Cond); + CondV = EvaluateExprAsBool(Cond); + } Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights); } @@ -1231,7 +1265,8 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, /*volatile*/ false); // Go to the next element. - llvm::Value *next = Builder.CreateConstInBoundsGEP1_32(cur, 1, "vla.next"); + llvm::Value *next = Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), + cur, 1, "vla.next"); // Leave if that's the end of the VLA. llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone"); diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h index 3a990d2..650ad7b 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h @@ -27,6 +27,7 @@ #include "clang/AST/Type.h" #include "clang/Basic/ABI.h" #include "clang/Basic/CapturedStmt.h" +#include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/TargetInfo.h" #include "clang/Frontend/CodeGenOptions.h" #include "llvm/ADT/ArrayRef.h" @@ -96,8 +97,8 @@ enum TypeEvaluationKind { /// CodeGenFunction - This class organizes the per-function state that is used /// while generating LLVM code. class CodeGenFunction : public CodeGenTypeCache { - CodeGenFunction(const CodeGenFunction &) LLVM_DELETED_FUNCTION; - void operator=(const CodeGenFunction &) LLVM_DELETED_FUNCTION; + CodeGenFunction(const CodeGenFunction &) = delete; + void operator=(const CodeGenFunction &) = delete; friend class CGCXXABI; public: @@ -191,26 +192,25 @@ public: CapturedRegionKind getKind() const { return Kind; } - void setContextValue(llvm::Value *V) { ThisValue = V; } + virtual void setContextValue(llvm::Value *V) { ThisValue = V; } // \brief Retrieve the value of the context parameter. - llvm::Value *getContextValue() const { return ThisValue; } + virtual llvm::Value *getContextValue() const { return ThisValue; } /// \brief Lookup the captured field decl for a variable. - const FieldDecl *lookup(const VarDecl *VD) const { + virtual const FieldDecl *lookup(const VarDecl *VD) const { return CaptureFields.lookup(VD); } - bool isCXXThisExprCaptured() const { return CXXThisFieldDecl != nullptr; } - FieldDecl *getThisFieldDecl() const { return CXXThisFieldDecl; } + bool isCXXThisExprCaptured() const { return getThisFieldDecl() != nullptr; } + virtual FieldDecl *getThisFieldDecl() const { return CXXThisFieldDecl; } static bool classof(const CGCapturedStmtInfo *) { return true; } /// \brief Emit the captured statement body. - virtual void EmitBody(CodeGenFunction &CGF, Stmt *S) { - RegionCounter Cnt = CGF.getPGORegionCounter(S); - Cnt.beginRegion(CGF.Builder); + virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S) { + CGF.incrementProfileCounter(S); CGF.EmitStmt(S); } @@ -262,6 +262,10 @@ public: /// potentially set the return value. bool SawAsmBlock; + /// True if the current function is an outlined SEH helper. This can be a + /// finally block or filter expression. + bool IsOutlinedSEHHelper; + const CodeGen::CGBlockInfo *BlockInfo; llvm::Value *BlockPointer; @@ -274,6 +278,7 @@ public: EHScopeStack EHStack; llvm::SmallVector<char, 256> LifetimeExtendedCleanupStack; + llvm::SmallVector<const JumpDest *, 2> SEHTryEpilogueStack; /// Header for data within LifetimeExtendedCleanupStack. struct LifetimeExtendedCleanupHeader { @@ -305,6 +310,12 @@ public: /// write the current selector value into this alloca. llvm::AllocaInst *EHSelectorSlot; + llvm::AllocaInst *AbnormalTerminationSlot; + + /// The implicit parameter to SEH filter functions of type + /// 'EXCEPTION_POINTERS*'. + ImplicitParamDecl *SEHPointersDecl; + /// Emits a landing pad for the current EH stack. llvm::BasicBlock *EmitLandingPad(); @@ -343,87 +354,32 @@ public: void exit(CodeGenFunction &CGF); }; - /// pushFullExprCleanup - Push a cleanup to be run at the end of the - /// current full-expression. Safe against the possibility that - /// we're currently inside a conditionally-evaluated expression. - template <class T, class A0> - void pushFullExprCleanup(CleanupKind kind, A0 a0) { - // If we're not in a conditional branch, or if none of the - // arguments requires saving, then use the unconditional cleanup. - if (!isInConditionalBranch()) - return EHStack.pushCleanup<T>(kind, a0); - - typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0); - - typedef EHScopeStack::ConditionalCleanup1<T, A0> CleanupType; - EHStack.pushCleanup<CleanupType>(kind, a0_saved); - initFullExprCleanup(); - } + /// Returns true inside SEH __try blocks. + bool isSEHTryScope() const { return !SEHTryEpilogueStack.empty(); } /// pushFullExprCleanup - Push a cleanup to be run at the end of the /// current full-expression. Safe against the possibility that /// we're currently inside a conditionally-evaluated expression. - template <class T, class A0, class A1> - void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1) { + template <class T, class... As> + void pushFullExprCleanup(CleanupKind kind, As... A) { // If we're not in a conditional branch, or if none of the // arguments requires saving, then use the unconditional cleanup. if (!isInConditionalBranch()) - return EHStack.pushCleanup<T>(kind, a0, a1); + return EHStack.pushCleanup<T>(kind, A...); - typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0); - typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1); + // Stash values in a tuple so we can guarantee the order of saves. + typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple; + SavedTuple Saved{saveValueInCond(A)...}; - typedef EHScopeStack::ConditionalCleanup2<T, A0, A1> CleanupType; - EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved); - initFullExprCleanup(); - } - - /// pushFullExprCleanup - Push a cleanup to be run at the end of the - /// current full-expression. Safe against the possibility that - /// we're currently inside a conditionally-evaluated expression. - template <class T, class A0, class A1, class A2> - void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2) { - // If we're not in a conditional branch, or if none of the - // arguments requires saving, then use the unconditional cleanup. - if (!isInConditionalBranch()) { - return EHStack.pushCleanup<T>(kind, a0, a1, a2); - } - - typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0); - typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1); - typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2); - - typedef EHScopeStack::ConditionalCleanup3<T, A0, A1, A2> CleanupType; - EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved, a2_saved); - initFullExprCleanup(); - } - - /// pushFullExprCleanup - Push a cleanup to be run at the end of the - /// current full-expression. Safe against the possibility that - /// we're currently inside a conditionally-evaluated expression. - template <class T, class A0, class A1, class A2, class A3> - void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2, A3 a3) { - // If we're not in a conditional branch, or if none of the - // arguments requires saving, then use the unconditional cleanup. - if (!isInConditionalBranch()) { - return EHStack.pushCleanup<T>(kind, a0, a1, a2, a3); - } - - typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0); - typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1); - typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2); - typename DominatingValue<A3>::saved_type a3_saved = saveValueInCond(a3); - - typedef EHScopeStack::ConditionalCleanup4<T, A0, A1, A2, A3> CleanupType; - EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved, - a2_saved, a3_saved); + typedef EHScopeStack::ConditionalCleanup<T, As...> CleanupType; + EHStack.pushCleanupTuple<CleanupType>(kind, Saved); initFullExprCleanup(); } /// \brief Queue a cleanup to be pushed after finishing the current /// full-expression. - template <class T, class A0, class A1, class A2, class A3> - void pushCleanupAfterFullExpr(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3) { + template <class T, class... As> + void pushCleanupAfterFullExpr(CleanupKind Kind, As... A) { assert(!isInConditionalBranch() && "can't defer conditional cleanup"); LifetimeExtendedCleanupHeader Header = { sizeof(T), Kind }; @@ -434,7 +390,7 @@ public: char *Buffer = &LifetimeExtendedCleanupStack[OldSize]; new (Buffer) LifetimeExtendedCleanupHeader(Header); - new (Buffer + sizeof(Header)) T(a0, a1, a2, a3); + new (Buffer + sizeof(Header)) T(A...); } /// Set up the last cleaup that was pushed as a conditional @@ -488,8 +444,8 @@ public: bool PerformCleanup; private: - RunCleanupsScope(const RunCleanupsScope &) LLVM_DELETED_FUNCTION; - void operator=(const RunCleanupsScope &) LLVM_DELETED_FUNCTION; + RunCleanupsScope(const RunCleanupsScope &) = delete; + void operator=(const RunCleanupsScope &) = delete; protected: CodeGenFunction& CGF; @@ -537,8 +493,8 @@ public: SmallVector<const LabelDecl*, 4> Labels; LexicalScope *ParentScope; - LexicalScope(const LexicalScope &) LLVM_DELETED_FUNCTION; - void operator=(const LexicalScope &) LLVM_DELETED_FUNCTION; + LexicalScope(const LexicalScope &) = delete; + void operator=(const LexicalScope &) = delete; public: /// \brief Enter a new cleanup scope. @@ -562,7 +518,10 @@ public: // If we should perform a cleanup, force them now. Note that // this ends the cleanup scope before rescoping any labels. - if (PerformCleanup) ForceCleanup(); + if (PerformCleanup) { + ApplyDebugLocation DL(CGF, Range.getEnd()); + ForceCleanup(); + } } /// \brief Force the emission of cleanups now, instead of waiting @@ -587,8 +546,8 @@ public: VarDeclMapTy SavedPrivates; private: - OMPPrivateScope(const OMPPrivateScope &) LLVM_DELETED_FUNCTION; - void operator=(const OMPPrivateScope &) LLVM_DELETED_FUNCTION; + OMPPrivateScope(const OMPPrivateScope &) = delete; + void operator=(const OMPPrivateScope &) = delete; public: /// \brief Enter a new OpenMP private scope. @@ -637,7 +596,10 @@ public: } /// \brief Exit scope - all the mapped variables are restored. - ~OMPPrivateScope() { ForceCleanup(); } + ~OMPPrivateScope() { + if (PerformCleanup) + ForceCleanup(); + } }; /// \brief Takes the old cleanup stack size and emits the cleanup blocks @@ -881,7 +843,8 @@ public: /// getByrefValueFieldNumber - Given a declaration, returns the LLVM field /// number that holds the value. - unsigned getByRefValueLLVMField(const ValueDecl *VD) const; + std::pair<llvm::Type *, unsigned> + getByRefValueLLVMField(const ValueDecl *VD) const; /// BuildBlockByrefAddress - Computes address location of the /// variable which is declared as __block. @@ -906,6 +869,10 @@ private: typedef llvm::DenseMap<const Decl*, llvm::Value*> DeclMapTy; DeclMapTy LocalDeclMap; + /// Track escaped local variables with auto storage. Used during SEH + /// outlining to produce a call to llvm.frameescape. + llvm::DenseMap<llvm::AllocaInst *, int> EscapedLocals; + /// LabelMap - This keeps track of the LLVM basic block for each C label. llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap; @@ -922,12 +889,39 @@ private: CodeGenPGO PGO; + /// Calculate branch weights appropriate for PGO data + llvm::MDNode *createProfileWeights(uint64_t TrueCount, uint64_t FalseCount); + llvm::MDNode *createProfileWeights(ArrayRef<uint64_t> Weights); + llvm::MDNode *createProfileWeightsForLoop(const Stmt *Cond, + uint64_t LoopCount); + public: - /// Get a counter for instrumentation of the region associated with the given - /// statement. - RegionCounter getPGORegionCounter(const Stmt *S) { - return RegionCounter(PGO, S); + /// Increment the profiler's counter for the given statement. + void incrementProfileCounter(const Stmt *S) { + if (CGM.getCodeGenOpts().ProfileInstrGenerate) + PGO.emitCounterIncrement(Builder, S); + PGO.setCurrentStmt(S); + } + + /// Get the profiler's count for the given statement. + uint64_t getProfileCount(const Stmt *S) { + Optional<uint64_t> Count = PGO.getStmtCount(S); + if (!Count.hasValue()) + return 0; + return *Count; + } + + /// Set the profiler's current count. + void setCurrentProfileCount(uint64_t Count) { + PGO.setCurrentRegionCount(Count); + } + + /// Get the profiler's current count. This is generally the count for the most + /// recently incremented counter. + uint64_t getCurrentProfileCount() { + return PGO.getCurrentRegionCount(); } + private: /// SwitchInsn - This is nearest current switch instruction. It is null if @@ -1097,6 +1091,11 @@ public: return getInvokeDestImpl(); } + bool currentFunctionUsesSEHTry() const { + const auto *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl); + return FD && FD->usesSEHTry(); + } + const TargetInfo &getTarget() const { return Target; } llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); } @@ -1248,7 +1247,7 @@ public: void EmitDestructorBody(FunctionArgList &Args); void emitImplicitAssignmentOperatorBody(FunctionArgList &Args); void EmitFunctionBody(FunctionArgList &Args, const Stmt *Body); - void EmitBlockWithFallThrough(llvm::BasicBlock *BB, RegionCounter &Cnt); + void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S); void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator, CallArgList &CallArgs); @@ -1258,15 +1257,18 @@ public: void EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD); void EmitAsanPrologueOrEpilogue(bool Prologue); - /// EmitReturnBlock - Emit the unified return block, trying to avoid its - /// emission when possible. + /// \brief Emit the unified return block, trying to avoid its emission when + /// possible. + /// \return The debug location of the user written return statement if the + /// return block is is avoided. llvm::DebugLoc EmitReturnBlock(); /// FinishFunction - Complete IR generation of the current function. It is /// legal to call this function even if there is no current insertion point. void FinishFunction(SourceLocation EndLoc=SourceLocation()); - void StartThunk(llvm::Function *Fn, GlobalDecl GD, const CGFunctionInfo &FnInfo); + void StartThunk(llvm::Function *Fn, GlobalDecl GD, + const CGFunctionInfo &FnInfo); void EmitCallAndReturnForThunk(llvm::Value *Callee, const ThunkInfo *Thunk); @@ -1309,6 +1311,19 @@ public: /// to by This. llvm::Value *GetVTablePtr(llvm::Value *This, llvm::Type *Ty); + /// \brief Derived is the presumed address of an object of type T after a + /// cast. If T is a polymorphic class type, emit a check that the virtual + /// table for Derived belongs to a class derived from T. + void EmitVTablePtrCheckForCast(QualType T, llvm::Value *Derived, + bool MayBeNull); + + /// EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable. + /// If vptr CFI is enabled, emit a check that VTable is valid. + void EmitVTablePtrCheckForCall(const CXXMethodDecl *MD, llvm::Value *VTable); + + /// EmitVTablePtrCheck - Emit a check that VTable is a valid virtual table for + /// RD using llvm.bitset.test. + void EmitVTablePtrCheck(const CXXRecordDecl *RD, llvm::Value *VTable); /// CanDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given /// expr can be devirtualized. @@ -1527,6 +1542,8 @@ public: void EmitAnyExprToMem(const Expr *E, llvm::Value *Location, Qualifiers Quals, bool IsInitializer); + void EmitAnyExprToExn(const Expr *E, llvm::Value *Addr); + /// EmitExprAsInit - Emits the code necessary to initialize a /// location in memory with the given initializer. void EmitExprAsInit(const Expr *init, const ValueDecl *D, LValue lvalue, @@ -1552,6 +1569,15 @@ public: true); } + void EmitAggregateCopyCtor(llvm::Value *DestPtr, llvm::Value *SrcPtr, + QualType DestTy, QualType SrcTy) { + CharUnits DestTypeAlign = getContext().getTypeAlignInChars(DestTy); + CharUnits SrcTypeAlign = getContext().getTypeAlignInChars(SrcTy); + EmitAggregateCopy(DestPtr, SrcPtr, SrcTy, /*IsVolatile=*/false, + std::min(DestTypeAlign, SrcTypeAlign), + /*IsAssignment=*/false); + } + /// EmitAggregateCopy - Emit an aggregate copy. /// /// \param isVolatile - True iff either the source or the destination is @@ -1723,12 +1749,16 @@ public: llvm::Value *This); void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType, - llvm::Value *NewPtr, llvm::Value *NumElements, + llvm::Type *ElementTy, llvm::Value *NewPtr, + llvm::Value *NumElements, llvm::Value *AllocSizeWithoutCookie); void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, llvm::Value *Ptr); + llvm::Value *EmitLifetimeStart(uint64_t Size, llvm::Value *Addr); + void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr); + llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E); void EmitCXXDeleteExpr(const CXXDeleteExpr *E); @@ -1889,8 +1919,8 @@ public: llvm::Value *getObjectAddress(CodeGenFunction &CGF) const { if (!IsByRef) return Address; - return CGF.Builder.CreateStructGEP(Address, - CGF.getByRefValueLLVMField(Variable), + auto F = CGF.getByRefValueLLVMField(Variable); + return CGF.Builder.CreateStructGEP(F.first, Address, F.second, Variable->getNameAsString()); } }; @@ -1988,6 +2018,30 @@ public: void EmitCXXTryStmt(const CXXTryStmt &S); void EmitSEHTryStmt(const SEHTryStmt &S); void EmitSEHLeaveStmt(const SEHLeaveStmt &S); + void EnterSEHTryStmt(const SEHTryStmt &S); + void ExitSEHTryStmt(const SEHTryStmt &S); + + void startOutlinedSEHHelper(CodeGenFunction &ParentCGF, StringRef Name, + QualType RetTy, FunctionArgList &Args, + const Stmt *OutlinedStmt); + + llvm::Function *GenerateSEHFilterFunction(CodeGenFunction &ParentCGF, + const SEHExceptStmt &Except); + + llvm::Function *GenerateSEHFinallyFunction(CodeGenFunction &ParentCGF, + const SEHFinallyStmt &Finally); + + void EmitSEHExceptionCodeSave(); + llvm::Value *EmitSEHExceptionCode(); + llvm::Value *EmitSEHExceptionInfo(); + llvm::Value *EmitSEHAbnormalTermination(); + + /// Scan the outlined statement for captures from the parent function. For + /// each capture, mark the capture as escaped and emit a call to + /// llvm.framerecover. Insert the framerecover result into the LocalDeclMap. + void EmitCapturedLocals(CodeGenFunction &ParentCGF, const Stmt *OutlinedStmt, + llvm::Value *ParentFP); + void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef<const Attr *> Attrs = None); @@ -1997,13 +2051,102 @@ public: llvm::Function *GenerateCapturedStmtFunctionEpilog(const CapturedStmt &S); llvm::Function *GenerateCapturedStmtFunction(const CapturedStmt &S); llvm::Value *GenerateCapturedStmtArgument(const CapturedStmt &S); - void EmitOMPAggregateAssign(LValue OriginalAddr, llvm::Value *PrivateAddr, - const Expr *AssignExpr, QualType Type, - const VarDecl *VDInit); - void EmitOMPFirstprivateClause(const OMPExecutableDirective &D, + /// \brief Perform element by element copying of arrays with type \a + /// OriginalType from \a SrcAddr to \a DestAddr using copying procedure + /// generated by \a CopyGen. + /// + /// \param DestAddr Address of the destination array. + /// \param SrcAddr Address of the source array. + /// \param OriginalType Type of destination and source arrays. + /// \param CopyGen Copying procedure that copies value of single array element + /// to another single array element. + void EmitOMPAggregateAssign( + llvm::Value *DestAddr, llvm::Value *SrcAddr, QualType OriginalType, + const llvm::function_ref<void(llvm::Value *, llvm::Value *)> &CopyGen); + /// \brief Emit proper copying of data from one variable to another. + /// + /// \param OriginalType Original type of the copied variables. + /// \param DestAddr Destination address. + /// \param SrcAddr Source address. + /// \param DestVD Destination variable used in \a CopyExpr (for arrays, has + /// type of the base array element). + /// \param SrcVD Source variable used in \a CopyExpr (for arrays, has type of + /// the base array element). + /// \param Copy Actual copygin expression for copying data from \a SrcVD to \a + /// DestVD. + void EmitOMPCopy(CodeGenFunction &CGF, QualType OriginalType, + llvm::Value *DestAddr, llvm::Value *SrcAddr, + const VarDecl *DestVD, const VarDecl *SrcVD, + const Expr *Copy); + /// \brief Emit atomic update code for constructs: \a X = \a X \a BO \a E or + /// \a X = \a E \a BO \a E. + /// + /// \param X Value to be updated. + /// \param E Update value. + /// \param BO Binary operation for update operation. + /// \param IsXLHSInRHSPart true if \a X is LHS in RHS part of the update + /// expression, false otherwise. + /// \param AO Atomic ordering of the generated atomic instructions. + /// \param CommonGen Code generator for complex expressions that cannot be + /// expressed through atomicrmw instruction. + /// \returns <true, OldAtomicValue> if simple 'atomicrmw' instruction was + /// generated, <false, RValue::get(nullptr)> otherwise. + std::pair<bool, RValue> EmitOMPAtomicSimpleUpdateExpr( + LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart, + llvm::AtomicOrdering AO, SourceLocation Loc, + const llvm::function_ref<RValue(RValue)> &CommonGen); + bool EmitOMPFirstprivateClause(const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope); void EmitOMPPrivateClause(const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope); + /// \brief Emit code for copyin clause in \a D directive. The next code is + /// generated at the start of outlined functions for directives: + /// \code + /// threadprivate_var1 = master_threadprivate_var1; + /// operator=(threadprivate_var2, master_threadprivate_var2); + /// ... + /// __kmpc_barrier(&loc, global_tid); + /// \endcode + /// + /// \param D OpenMP directive possibly with 'copyin' clause(s). + /// \returns true if at least one copyin variable is found, false otherwise. + bool EmitOMPCopyinClause(const OMPExecutableDirective &D); + /// \brief Emit initial code for lastprivate variables. If some variable is + /// not also firstprivate, then the default initialization is used. Otherwise + /// initialization of this variable is performed by EmitOMPFirstprivateClause + /// method. + /// + /// \param D Directive that may have 'lastprivate' directives. + /// \param PrivateScope Private scope for capturing lastprivate variables for + /// proper codegen in internal captured statement. + /// + /// \returns true if there is at least one lastprivate variable, false + /// otherwise. + bool EmitOMPLastprivateClauseInit(const OMPExecutableDirective &D, + OMPPrivateScope &PrivateScope); + /// \brief Emit final copying of lastprivate values to original variables at + /// the end of the worksharing or simd directive. + /// + /// \param D Directive that has at least one 'lastprivate' directives. + /// \param IsLastIterCond Boolean condition that must be set to 'i1 true' if + /// it is the last iteration of the loop code in associated directive, or to + /// 'i1 false' otherwise. + void EmitOMPLastprivateClauseFinal(const OMPExecutableDirective &D, + llvm::Value *IsLastIterCond); + /// \brief Emit initial code for reduction variables. Creates reduction copies + /// and initializes them with the values according to OpenMP standard. + /// + /// \param D Directive (possibly) with the 'reduction' clause. + /// \param PrivateScope Private scope for capturing reduction variables for + /// proper codegen in internal captured statement. + /// + void EmitOMPReductionClauseInit(const OMPExecutableDirective &D, + OMPPrivateScope &PrivateScope); + /// \brief Emit final update of reduction values to original variables at + /// the end of the directive. + /// + /// \param D Directive that has at least one 'reduction' directives. + void EmitOMPReductionClauseFinal(const OMPExecutableDirective &D); void EmitOMPParallelDirective(const OMPParallelDirective &S); void EmitOMPSimdDirective(const OMPSimdDirective &S); @@ -2027,15 +2170,37 @@ public: void EmitOMPTargetDirective(const OMPTargetDirective &S); void EmitOMPTeamsDirective(const OMPTeamsDirective &S); + /// \brief Emit inner loop of the worksharing/simd construct. + /// + /// \param S Directive, for which the inner loop must be emitted. + /// \param RequiresCleanup true, if directive has some associated private + /// variables. + /// \param LoopCond Bollean condition for loop continuation. + /// \param IncExpr Increment expression for loop control variable. + /// \param BodyGen Generator for the inner body of the inner loop. + /// \param PostIncGen Genrator for post-increment code (required for ordered + /// loop directvies). + void EmitOMPInnerLoop( + const Stmt &S, bool RequiresCleanup, const Expr *LoopCond, + const Expr *IncExpr, + const llvm::function_ref<void(CodeGenFunction &)> &BodyGen, + const llvm::function_ref<void(CodeGenFunction &)> &PostIncGen); + private: /// Helpers for the OpenMP loop directives. void EmitOMPLoopBody(const OMPLoopDirective &Directive, bool SeparateIter = false); - void EmitOMPInnerLoop(const OMPLoopDirective &S, OMPPrivateScope &LoopScope, - bool SeparateIter = false); void EmitOMPSimdFinal(const OMPLoopDirective &S); - void EmitOMPWorksharingLoop(const OMPLoopDirective &S); + /// \brief Emit code for the worksharing loop-based directive. + /// \return true, if this construct has any lastprivate clause, false - + /// otherwise. + bool EmitOMPWorksharingLoop(const OMPLoopDirective &S); + void EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind, + const OMPLoopDirective &S, + OMPPrivateScope &LoopScope, bool Ordered, + llvm::Value *LB, llvm::Value *UB, llvm::Value *ST, + llvm::Value *IL, llvm::Value *Chunk); public: @@ -2085,17 +2250,31 @@ public: void EmitAtomicInit(Expr *E, LValue lvalue); + bool LValueIsSuitableForInlineAtomic(LValue Src); + bool typeIsSuitableForInlineAtomic(QualType Ty, bool IsVolatile) const; + + RValue EmitAtomicLoad(LValue LV, SourceLocation SL, + AggValueSlot Slot = AggValueSlot::ignored()); + RValue EmitAtomicLoad(LValue lvalue, SourceLocation loc, + llvm::AtomicOrdering AO, bool IsVolatile = false, AggValueSlot slot = AggValueSlot::ignored()); void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit); - std::pair<RValue, RValue> EmitAtomicCompareExchange( + void EmitAtomicStore(RValue rvalue, LValue lvalue, llvm::AtomicOrdering AO, + bool IsVolatile, bool isInit); + + std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange( LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success = llvm::SequentiallyConsistent, llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent, bool IsWeak = false, AggValueSlot Slot = AggValueSlot::ignored()); + void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, + const llvm::function_ref<RValue(RValue)> &UpdateOp, + bool IsVolatile); + /// EmitToMemory - Change a scalar value from its value /// representation to its in-memory representation. llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty); @@ -2163,8 +2342,8 @@ public: /// Emit an l-value for an assignment (simple or compound) of complex type. LValue EmitComplexAssignmentLValue(const BinaryOperator *E); LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E); - LValue EmitScalarCompooundAssignWithComplex(const CompoundAssignOperator *E, - llvm::Value *&Result); + LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E, + llvm::Value *&Result); // Note: only available for agg return types LValue EmitBinaryOperatorLValue(const BinaryOperator *E); @@ -2208,7 +2387,7 @@ public: return ConstantEmission(C, false); } - LLVM_EXPLICIT operator bool() const { + explicit operator bool() const { return ValueAndIsReference.getOpaqueValue() != nullptr; } @@ -2397,6 +2576,7 @@ public: llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E); llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E); llvm::Value *EmitR600BuiltinExpr(unsigned BuiltinID, const CallExpr *E); + llvm::Value *EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E); llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E); llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E); @@ -2655,7 +2835,7 @@ public: /// \brief Create a basic block that will call a handler function in a /// sanitizer runtime with the provided arguments, and create a conditional /// branch to it. - void EmitCheck(ArrayRef<std::pair<llvm::Value *, SanitizerKind>> Checked, + void EmitCheck(ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked, StringRef CheckName, ArrayRef<llvm::Constant *> StaticArgs, ArrayRef<llvm::Value *> DynamicArgs); @@ -2714,7 +2894,7 @@ public: CallExpr::const_arg_iterator ArgBeg, CallExpr::const_arg_iterator ArgEnd, const FunctionDecl *CalleeDecl = nullptr, - unsigned ParamsToSkip = 0, bool ForceColumnInfo = false) { + unsigned ParamsToSkip = 0) { SmallVector<QualType, 16> ArgTypes; CallExpr::const_arg_iterator Arg = ArgBeg; @@ -2747,15 +2927,14 @@ public: for (; Arg != ArgEnd; ++Arg) ArgTypes.push_back(getVarArgType(*Arg)); - EmitCallArgs(Args, ArgTypes, ArgBeg, ArgEnd, CalleeDecl, ParamsToSkip, - ForceColumnInfo); + EmitCallArgs(Args, ArgTypes, ArgBeg, ArgEnd, CalleeDecl, ParamsToSkip); } void EmitCallArgs(CallArgList &Args, ArrayRef<QualType> ArgTypes, CallExpr::const_arg_iterator ArgBeg, CallExpr::const_arg_iterator ArgEnd, const FunctionDecl *CalleeDecl = nullptr, - unsigned ParamsToSkip = 0, bool ForceColumnInfo = false); + unsigned ParamsToSkip = 0); private: QualType getVarArgType(const Expr *Arg); diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp index d653130..4005061 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp @@ -141,12 +141,14 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO, RRData = new RREntrypoints(); if (!CodeGenOpts.InstrProfileInput.empty()) { - if (std::error_code EC = llvm::IndexedInstrProfReader::create( - CodeGenOpts.InstrProfileInput, PGOReader)) { + auto ReaderOrErr = + llvm::IndexedInstrProfReader::create(CodeGenOpts.InstrProfileInput); + if (std::error_code EC = ReaderOrErr.getError()) { unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, "Could not read profile: %0"); getDiags().Report(DiagID) << EC.message(); - } + } else + PGOReader = std::move(ReaderOrErr.get()); } // If coverage mapping generation is enabled, create the @@ -321,6 +323,8 @@ void CodeGenModule::checkAliases() { void CodeGenModule::clear() { DeferredDeclsToEmit.clear(); + if (OpenMPRuntime) + OpenMPRuntime->clear(); } void InstrProfStats::reportDiagnostics(DiagnosticsEngine &Diags, @@ -346,6 +350,13 @@ void CodeGenModule::Release() { if (ObjCRuntime) if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction()) AddGlobalCtor(ObjCInitFunction); + if (Context.getLangOpts().CUDA && !Context.getLangOpts().CUDAIsDevice && + CUDARuntime) { + if (llvm::Function *CudaCtorFunction = CUDARuntime->makeModuleCtorFunction()) + AddGlobalCtor(CudaCtorFunction); + if (llvm::Function *CudaDtorFunction = CUDARuntime->makeModuleDtorFunction()) + AddGlobalDtor(CudaDtorFunction); + } if (PGOReader && PGOStats.hasDiagnostics()) PGOStats.reportDiagnostics(getDiags(), getCodeGenOpts().MainFileName); EmitCtorList(GlobalCtors, "llvm.global_ctors"); @@ -741,23 +752,6 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D, else if (LangOpts.getStackProtector() == LangOptions::SSPReq) B.addAttribute(llvm::Attribute::StackProtectReq); - // Add sanitizer attributes if function is not blacklisted. - if (!isInSanitizerBlacklist(F, D->getLocation())) { - // When AddressSanitizer is enabled, set SanitizeAddress attribute - // unless __attribute__((no_sanitize_address)) is used. - if (LangOpts.Sanitize.has(SanitizerKind::Address) && - !D->hasAttr<NoSanitizeAddressAttr>()) - B.addAttribute(llvm::Attribute::SanitizeAddress); - // Same for ThreadSanitizer and __attribute__((no_sanitize_thread)) - if (LangOpts.Sanitize.has(SanitizerKind::Thread) && - !D->hasAttr<NoSanitizeThreadAttr>()) - B.addAttribute(llvm::Attribute::SanitizeThread); - // Same for MemorySanitizer and __attribute__((no_sanitize_memory)) - if (LangOpts.Sanitize.has(SanitizerKind::Memory) && - !D->hasAttr<NoSanitizeMemoryAttr>()) - B.addAttribute(llvm::Attribute::SanitizeMemory); - } - F->addAttributes(llvm::AttributeSet::FunctionIndex, llvm::AttributeSet::get( F->getContext(), llvm::AttributeSet::FunctionIndex, B)); @@ -865,11 +859,10 @@ static void setLinkageAndVisibilityForGV(llvm::GlobalValue *GV, void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F, bool IsIncompleteFunction, bool IsThunk) { - if (unsigned IID = F->getIntrinsicID()) { + if (llvm::Intrinsic::ID IID = F->getIntrinsicID()) { // If this is an intrinsic function, set the function's attributes // to the intrinsic's attributes. - F->setAttributes(llvm::Intrinsic::getAttributes(getLLVMContext(), - (llvm::Intrinsic::ID)IID)); + F->setAttributes(llvm::Intrinsic::getAttributes(getLLVMContext(), IID)); return; } @@ -1085,29 +1078,33 @@ void CodeGenModule::EmitDeferred() { // previously unused static decl may become used during the generation of code // for a static function, iterate until no changes are made. - while (true) { - if (!DeferredVTables.empty()) { - EmitDeferredVTables(); + if (!DeferredVTables.empty()) { + EmitDeferredVTables(); - // Emitting a v-table doesn't directly cause more v-tables to - // become deferred, although it can cause functions to be - // emitted that then need those v-tables. - assert(DeferredVTables.empty()); - } + // Emitting a v-table doesn't directly cause more v-tables to + // become deferred, although it can cause functions to be + // emitted that then need those v-tables. + assert(DeferredVTables.empty()); + } + + // Stop if we're out of both deferred v-tables and deferred declarations. + if (DeferredDeclsToEmit.empty()) + return; - // Stop if we're out of both deferred v-tables and deferred declarations. - if (DeferredDeclsToEmit.empty()) break; + // Grab the list of decls to emit. If EmitGlobalDefinition schedules more + // work, it will not interfere with this. + std::vector<DeferredGlobal> CurDeclsToEmit; + CurDeclsToEmit.swap(DeferredDeclsToEmit); - DeferredGlobal &G = DeferredDeclsToEmit.back(); + for (DeferredGlobal &G : CurDeclsToEmit) { GlobalDecl D = G.GD; llvm::GlobalValue *GV = G.GV; - DeferredDeclsToEmit.pop_back(); + G.GV = nullptr; assert(!GV || GV == GetGlobalValue(getMangledName(D))); if (!GV) GV = GetGlobalValue(getMangledName(D)); - // Check to see if we've already emitted this. This is necessary // for a couple of reasons: first, decls can end up in the // deferred-decls queue multiple times, and second, decls can end @@ -1119,6 +1116,14 @@ void CodeGenModule::EmitDeferred() { // Otherwise, emit the definition and move on to the next one. EmitGlobalDefinition(D, GV); + + // If we found out that we need to emit more decls, do that recursively. + // This has the advantage that the decls are emitted in a DFS and related + // ones are close together, which is convenient for testing. + if (!DeferredVTables.empty() || !DeferredDeclsToEmit.empty()) { + EmitDeferred(); + assert(DeferredVTables.empty() && DeferredDeclsToEmit.empty()); + } } } @@ -1275,6 +1280,8 @@ llvm::Constant *CodeGenModule::GetAddrOfUuidDescriptor( auto *GV = new llvm::GlobalVariable( getModule(), Init->getType(), /*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name); + if (supportsCOMDAT()) + GV->setComdat(TheModule.getOrInsertComdat(GV->getName())); return GV; } @@ -1322,7 +1329,7 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) { // If this is CUDA, be selective about which declarations we emit. if (LangOpts.CUDA) { - if (CodeGenOpts.CUDAIsDevice) { + if (LangOpts.CUDAIsDevice) { if (!Global->hasAttr<CUDADeviceAttr>() && !Global->hasAttr<CUDAGlobalAttr>() && !Global->hasAttr<CUDAConstantAttr>() && @@ -1603,13 +1610,6 @@ CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName, addDeferredDeclToEmit(F, DDI->second); DeferredDecls.erase(DDI); - // Otherwise, if this is a sized deallocation function, emit a weak - // definition - // for it at the end of the translation unit. - } else if (D && cast<FunctionDecl>(D) - ->getCorrespondingUnsizedGlobalDeallocationFunction()) { - addDeferredDeclToEmit(F, GD); - // Otherwise, there are cases we have to worry about where we're // using a declaration for which we must emit a definition but where // we might not find a top-level definition: @@ -1766,6 +1766,8 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, // handling. GV->setConstant(isTypeConstant(D->getType(), false)); + GV->setAlignment(getContext().getDeclAlign(D).getQuantity()); + setLinkageAndVisibilityForGV(GV, D); if (D->getTLSKind()) { @@ -1829,7 +1831,11 @@ CodeGenModule::CreateOrReplaceCXXRuntimeVariable(StringRef Name, OldGV->eraseFromParent(); } - + + if (supportsCOMDAT() && GV->isWeakForLinker() && + !GV->hasAvailableExternallyLinkage()) + GV->setComdat(TheModule.getOrInsertComdat(GV->getName())); + return GV; } @@ -1884,7 +1890,7 @@ CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const { unsigned CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D, unsigned AddrSpace) { - if (LangOpts.CUDA && CodeGenOpts.CUDAIsDevice) { + if (LangOpts.CUDA && LangOpts.CUDAIsDevice) { if (D->hasAttr<CUDAConstantAttr>()) AddrSpace = getContext().getTargetAddressSpace(LangAS::cuda_constant); else if (D->hasAttr<CUDASharedAttr>()) @@ -1929,6 +1935,38 @@ void CodeGenModule::MaybeHandleStaticInExternC(const SomeDecl *D, R.first->second = nullptr; } +static bool shouldBeInCOMDAT(CodeGenModule &CGM, const Decl &D) { + if (!CGM.supportsCOMDAT()) + return false; + + if (D.hasAttr<SelectAnyAttr>()) + return true; + + GVALinkage Linkage; + if (auto *VD = dyn_cast<VarDecl>(&D)) + Linkage = CGM.getContext().GetGVALinkageForVariable(VD); + else + Linkage = CGM.getContext().GetGVALinkageForFunction(cast<FunctionDecl>(&D)); + + switch (Linkage) { + case GVA_Internal: + case GVA_AvailableExternally: + case GVA_StrongExternal: + return false; + case GVA_DiscardableODR: + case GVA_StrongODR: + return true; + } + llvm_unreachable("No such linkage"); +} + +void CodeGenModule::maybeSetTrivialComdat(const Decl &D, + llvm::GlobalObject &GO) { + if (!shouldBeInCOMDAT(*this, D)) + return; + GO.setComdat(TheModule.getOrInsertComdat(GO.getName())); +} + void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) { llvm::Constant *Init = nullptr; QualType ASTTy = D->getType(); @@ -2072,6 +2110,8 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) { setTLSMode(GV, *D); } + maybeSetTrivialComdat(*D, *GV); + // Emit the initializer function if necessary. if (NeedsGlobalCtor || NeedsGlobalDtor) EmitCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor); @@ -2085,7 +2125,8 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) { } static bool isVarDeclStrongDefinition(const ASTContext &Context, - const VarDecl *D, bool NoCommon) { + CodeGenModule &CGM, const VarDecl *D, + bool NoCommon) { // Don't give variables common linkage if -fno-common was specified unless it // was overridden by a NoCommon attribute. if ((NoCommon || D->hasAttr<NoCommonAttr>()) && !D->hasAttr<CommonAttr>()) @@ -2110,11 +2151,31 @@ static bool isVarDeclStrongDefinition(const ASTContext &Context, if (D->hasAttr<WeakImportAttr>()) return true; + // A variable cannot be both common and exist in a comdat. + if (shouldBeInCOMDAT(CGM, *D)) + return true; + // Declarations with a required alignment do not have common linakge in MSVC // mode. - if (Context.getLangOpts().MSVCCompat && - (Context.isAlignmentRequired(D->getType()) || D->hasAttr<AlignedAttr>())) - return true; + if (Context.getLangOpts().MSVCCompat) { + if (D->hasAttr<AlignedAttr>()) + return true; + QualType VarType = D->getType(); + if (Context.isAlignmentRequired(VarType)) + return true; + + if (const auto *RT = VarType->getAs<RecordType>()) { + const RecordDecl *RD = RT->getDecl(); + for (const FieldDecl *FD : RD->fields()) { + if (FD->isBitField()) + continue; + if (FD->hasAttr<AlignedAttr>()) + return true; + if (Context.isAlignmentRequired(FD->getType())) + return true; + } + } + } return false; } @@ -2162,7 +2223,7 @@ llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageForDeclarator( // C++ doesn't have tentative definitions and thus cannot have common // linkage. if (!getLangOpts().CPlusPlus && isa<VarDecl>(D) && - !isVarDeclStrongDefinition(Context, cast<VarDecl>(D), + !isVarDeclStrongDefinition(Context, *this, cast<VarDecl>(D), CodeGenOpts.NoCommon)) return llvm::GlobalVariable::CommonLinkage; @@ -2285,7 +2346,7 @@ static void replaceUsesOfNonProtoConstant(llvm::Constant *old, callSite->replaceAllUsesWith(newCall.getInstruction()); // Copy debug location attached to CI. - if (!callSite->getDebugLoc().isUnknown()) + if (callSite->getDebugLoc()) newCall->setDebugLoc(callSite->getDebugLoc()); callSite->eraseFromParent(); } @@ -2406,6 +2467,8 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD, MaybeHandleStaticInExternC(D, Fn); + maybeSetTrivialComdat(*D, *Fn); + CodeGenFunction(*this).GenerateCode(D, Fn, FI); setFunctionDefinitionAttributes(D, Fn); @@ -2449,7 +2512,7 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) { // Create the new alias itself, but don't set a name yet. auto *GA = llvm::GlobalAlias::create( - cast<llvm::PointerType>(Aliasee->getType())->getElementType(), 0, + cast<llvm::PointerType>(Aliasee->getType()), llvm::Function::ExternalLinkage, "", Aliasee, &getModule()); if (Entry) { @@ -2497,12 +2560,10 @@ llvm::Function *CodeGenModule::getIntrinsic(unsigned IID, Tys); } -static llvm::StringMapEntry<llvm::Constant*> & -GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map, - const StringLiteral *Literal, - bool TargetIsLSB, - bool &IsUTF16, - unsigned &StringLength) { +static llvm::StringMapEntry<llvm::GlobalVariable *> & +GetConstantCFStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map, + const StringLiteral *Literal, bool TargetIsLSB, + bool &IsUTF16, unsigned &StringLength) { StringRef String = Literal->getString(); unsigned NumBytes = String.size(); @@ -2534,10 +2595,9 @@ GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map, nullptr)).first; } -static llvm::StringMapEntry<llvm::Constant*> & -GetConstantStringEntry(llvm::StringMap<llvm::Constant*> &Map, - const StringLiteral *Literal, - unsigned &StringLength) { +static llvm::StringMapEntry<llvm::GlobalVariable *> & +GetConstantStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map, + const StringLiteral *Literal, unsigned &StringLength) { StringRef String = Literal->getString(); StringLength = String.size(); return *Map.insert(std::make_pair(String, nullptr)).first; @@ -2547,10 +2607,10 @@ llvm::Constant * CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) { unsigned StringLength = 0; bool isUTF16 = false; - llvm::StringMapEntry<llvm::Constant*> &Entry = - GetConstantCFStringEntry(CFConstantStringMap, Literal, - getDataLayout().isLittleEndian(), - isUTF16, StringLength); + llvm::StringMapEntry<llvm::GlobalVariable *> &Entry = + GetConstantCFStringEntry(CFConstantStringMap, Literal, + getDataLayout().isLittleEndian(), isUTF16, + StringLength); if (auto *C = Entry.second) return C; @@ -2566,7 +2626,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) { llvm::Constant *GV = CreateRuntimeVariable(Ty, "__CFConstantStringClassReference"); // Decay array -> ptr - V = llvm::ConstantExpr::getGetElementPtr(GV, Zeros); + V = llvm::ConstantExpr::getGetElementPtr(Ty, GV, Zeros); CFConstantStringClassRef = V; } else @@ -2619,7 +2679,8 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) { } // String. - Fields[2] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros); + Fields[2] = + llvm::ConstantExpr::getGetElementPtr(GV->getValueType(), GV, Zeros); if (isUTF16) // Cast the UTF16 string to the correct type. @@ -2640,11 +2701,11 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) { return GV; } -llvm::Constant * +llvm::GlobalVariable * CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) { unsigned StringLength = 0; - llvm::StringMapEntry<llvm::Constant*> &Entry = - GetConstantStringEntry(CFConstantStringMap, Literal, StringLength); + llvm::StringMapEntry<llvm::GlobalVariable *> &Entry = + GetConstantStringEntry(CFConstantStringMap, Literal, StringLength); if (auto *C = Entry.second) return C; @@ -2673,11 +2734,10 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) { llvm::Type *PTy = llvm::ArrayType::get(Ty, 0); GV = CreateRuntimeVariable(PTy, str); // Decay array -> ptr - V = llvm::ConstantExpr::getGetElementPtr(GV, Zeros); + V = llvm::ConstantExpr::getGetElementPtr(PTy, GV, Zeros); ConstantStringClassRef = V; } - } - else + } else V = ConstantStringClassRef; if (!NSConstantStringType) { @@ -2733,8 +2793,9 @@ CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) { // of the string is via this class initializer. CharUnits Align = getContext().getTypeAlignInChars(getContext().CharTy); GV->setAlignment(Align.getQuantity()); - Fields[1] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros); - + Fields[1] = + llvm::ConstantExpr::getGetElementPtr(GV->getValueType(), GV, Zeros); + // String length. llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy); Fields[2] = llvm::ConstantInt::get(Ty, StringLength); @@ -2838,12 +2899,18 @@ GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT, if (CGM.getLangOpts().OpenCL) AddrSpace = CGM.getContext().getTargetAddressSpace(LangAS::opencl_constant); + llvm::Module &M = CGM.getModule(); // Create a global variable for this string auto *GV = new llvm::GlobalVariable( - CGM.getModule(), C->getType(), !CGM.getLangOpts().WritableStrings, LT, C, - GlobalName, nullptr, llvm::GlobalVariable::NotThreadLocal, AddrSpace); + M, C->getType(), !CGM.getLangOpts().WritableStrings, LT, C, GlobalName, + nullptr, llvm::GlobalVariable::NotThreadLocal, AddrSpace); GV->setAlignment(Alignment); GV->setUnnamedAddr(true); + if (GV->isWeakForLinker()) { + assert(CGM.supportsCOMDAT() && "Only COFF uses weak string literals"); + GV->setComdat(M.getOrInsertComdat(GV->getName())); + } + return GV; } @@ -3002,10 +3069,19 @@ llvm::Constant *CodeGenModule::GetAddrOfGlobalTemporary( // Create a global variable for this lifetime-extended temporary. llvm::GlobalValue::LinkageTypes Linkage = getLLVMLinkageVarDefinition(VD, Constant); - // There is no need for this temporary to have global linkage if the global - // variable has external linkage. - if (Linkage == llvm::GlobalVariable::ExternalLinkage) - Linkage = llvm::GlobalVariable::PrivateLinkage; + if (Linkage == llvm::GlobalVariable::ExternalLinkage) { + const VarDecl *InitVD; + if (VD->isStaticDataMember() && VD->getAnyInitializer(InitVD) && + isa<CXXRecordDecl>(InitVD->getLexicalDeclContext())) { + // Temporaries defined inside a class get linkonce_odr linkage because the + // class can be defined in multipe translation units. + Linkage = llvm::GlobalVariable::LinkOnceODRLinkage; + } else { + // There is no need for this temporary to have external linkage if the + // VarDecl has external linkage. + Linkage = llvm::GlobalVariable::InternalLinkage; + } + } unsigned AddrSpace = GetGlobalVarAddressSpace( VD, getContext().getTargetAddressSpace(MaterializedType)); auto *GV = new llvm::GlobalVariable( @@ -3015,6 +3091,8 @@ llvm::Constant *CodeGenModule::GetAddrOfGlobalTemporary( setGlobalVisibility(GV, VD); GV->setAlignment( getContext().getTypeAlignInChars(MaterializedType).getQuantity()); + if (supportsCOMDAT() && GV->isWeakForLinker()) + GV->setComdat(TheModule.getOrInsertComdat(GV->getName())); if (VD->getTLSKind()) setTLSMode(GV, *VD); Slot = GV; @@ -3268,16 +3346,11 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) { break; case Decl::FileScopeAsm: { + // File-scope asm is ignored during device-side CUDA compilation. + if (LangOpts.CUDA && LangOpts.CUDAIsDevice) + break; auto *AD = cast<FileScopeAsmDecl>(D); - StringRef AsmString = AD->getAsmString()->getString(); - - const std::string &S = getModule().getModuleInlineAsm(); - if (S.empty()) - getModule().setModuleInlineAsm(AsmString); - else if (S.end()[-1] == '\n') - getModule().setModuleInlineAsm(S + AsmString.str()); - else - getModule().setModuleInlineAsm(S + '\n' + AsmString.str()); + getModule().appendModuleInlineAsm(AD->getAsmString()->getString()); break; } @@ -3285,7 +3358,7 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) { auto *Import = cast<ImportDecl>(D); // Ignore import declarations that come from imported modules. - if (clang::Module *Owner = Import->getOwningModule()) { + if (clang::Module *Owner = Import->getImportedOwningModule()) { if (getLangOpts().CurrentModule.empty() || Owner->getTopLevelModule()->Name == getLangOpts().CurrentModule) break; @@ -3357,7 +3430,7 @@ void CodeGenModule::ClearUnusedCoverageMapping(const Decl *D) { void CodeGenModule::EmitDeferredUnusedCoverageMappings() { std::vector<const Decl *> DeferredDecls; - for (const auto I : DeferredEmptyCoverageMappingDecls) { + for (const auto &I : DeferredEmptyCoverageMappingDecls) { if (!I.second) continue; DeferredDecls.push_back(I.first); @@ -3561,6 +3634,12 @@ llvm::Constant *CodeGenModule::EmitUuidofInitializer(StringRef Uuid) { return llvm::ConstantStruct::getAnon(Fields); } +llvm::Constant * +CodeGenModule::getAddrOfCXXCatchHandlerType(QualType Ty, + QualType CatchHandlerType) { + return getCXXABI().getAddrOfCXXCatchHandlerType(Ty, CatchHandlerType); +} + llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH) { // Return a bogus pointer if RTTI is disabled, unless it's for EH. @@ -3583,11 +3662,8 @@ void CodeGenModule::EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) { VD->getAnyInitializer() && !VD->getAnyInitializer()->isConstantInitializer(getContext(), /*ForRef=*/false); - if (auto InitFunction = - getOpenMPRuntime().EmitOMPThreadPrivateVarDefinition( - VD, GetAddrOfGlobalVar(VD), RefExpr->getLocStart(), - PerformInit)) + if (auto InitFunction = getOpenMPRuntime().emitThreadPrivateVarDefinition( + VD, GetAddrOfGlobalVar(VD), RefExpr->getLocStart(), PerformInit)) CXXGlobalInits.push_back(InitFunction); } } - diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h index 2aafe7e..feef6c2 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h @@ -258,8 +258,8 @@ public: /// This class organizes the cross-function state that is used while generating /// LLVM code. class CodeGenModule : public CodeGenTypeCache { - CodeGenModule(const CodeGenModule &) LLVM_DELETED_FUNCTION; - void operator=(const CodeGenModule &) LLVM_DELETED_FUNCTION; + CodeGenModule(const CodeGenModule &) = delete; + void operator=(const CodeGenModule &) = delete; public: struct Structor { @@ -366,7 +366,7 @@ private: /// Map used to get unique annotation strings. llvm::StringMap<llvm::Constant*> AnnotationStrings; - llvm::StringMap<llvm::Constant*> CFConstantStringMap; + llvm::StringMap<llvm::GlobalVariable *> CFConstantStringMap; llvm::DenseMap<llvm::Constant *, llvm::GlobalVariable *> ConstantStringMap; llvm::DenseMap<const Decl*, llvm::Constant *> StaticLocalDeclMap; @@ -400,7 +400,8 @@ private: /// When a C++ decl with an initializer is deferred, null is /// appended to CXXGlobalInits, and the index of that null is placed /// here so that the initializer will be performed in the correct - /// order. + /// order. Once the decl is emitted, the index is replaced with ~0U to ensure + /// that we don't re-emit the initializer. llvm::DenseMap<const Decl*, unsigned> DelayedCXXInitPosition; typedef std::pair<OrderGlobalInits, llvm::Function*> GlobalInitData; @@ -606,6 +607,7 @@ public: const TargetInfo &getTarget() const { return Target; } const llvm::Triple &getTriple() const; bool supportsCOMDAT() const; + void maybeSetTrivialComdat(const Decl &D, llvm::GlobalObject &GO); CGCXXABI &getCXXABI() const { return *ABI; } llvm::LLVMContext &getLLVMContext() { return VMContext; } @@ -718,6 +720,9 @@ public: /// Get the address of the RTTI descriptor for the given type. llvm::Constant *GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH = false); + llvm::Constant *getAddrOfCXXCatchHandlerType(QualType Ty, + QualType CatchHandlerType); + /// Get the address of a uuid descriptor . llvm::Constant *GetAddrOfUuidDescriptor(const CXXUuidofExpr* E); @@ -782,7 +787,7 @@ public: /// Return a pointer to a constant NSString object for the given string. Or a /// user defined String object as defined via /// -fconstant-string-class=class_name option. - llvm::Constant *GetAddrOfConstantString(const StringLiteral *Literal); + llvm::GlobalVariable *GetAddrOfConstantString(const StringLiteral *Literal); /// Return a constant array for the given string. llvm::Constant *GetConstantArrayFromStringLiteral(const StringLiteral *E); @@ -993,7 +998,7 @@ public: void EmitTentativeDefinition(const VarDecl *D); - void EmitVTable(CXXRecordDecl *Class, bool DefinitionRequired); + void EmitVTable(CXXRecordDecl *Class); /// Emit the RTTI descriptors for the builtin types. void EmitFundamentalRTTIDescriptors(); @@ -1102,6 +1107,14 @@ public: /// \param D Threadprivate declaration. void EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D); + /// Emit bit set entries for the given vtable using the given layout if + /// vptr CFI is enabled. + void EmitVTableBitSetEntries(llvm::GlobalVariable *VTable, + const VTableLayout &VTLayout); + + /// \breif Get the declaration of std::terminate for the platform. + llvm::Constant *getTerminateFn(); + private: llvm::Constant * GetOrCreateLLVMFunction(StringRef MangledName, llvm::Type *Ty, GlobalDecl D, diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenPGO.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenPGO.cpp index 24b035d..c972443 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenPGO.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenPGO.cpp @@ -58,12 +58,16 @@ void CodeGenPGO::setFuncName(llvm::Function *Fn) { } void CodeGenPGO::createFuncNameVar(llvm::GlobalValue::LinkageTypes Linkage) { - // Usually, we want to match the function's linkage, but - // available_externally and extern_weak both have the wrong semantics. + // We generally want to match the function's linkage, but available_externally + // and extern_weak both have the wrong semantics, and anything that doesn't + // need to link across compilation units doesn't need to be visible at all. if (Linkage == llvm::GlobalValue::ExternalWeakLinkage) Linkage = llvm::GlobalValue::LinkOnceAnyLinkage; else if (Linkage == llvm::GlobalValue::AvailableExternallyLinkage) Linkage = llvm::GlobalValue::LinkOnceODRLinkage; + else if (Linkage == llvm::GlobalValue::InternalLinkage || + Linkage == llvm::GlobalValue::ExternalLinkage) + Linkage = llvm::GlobalValue::PrivateLinkage; auto *Value = llvm::ConstantDataArray::getString(CGM.getLLVMContext(), FuncName, false); @@ -138,482 +142,469 @@ const int PGOHash::NumBitsPerType; const unsigned PGOHash::NumTypesPerWord; const unsigned PGOHash::TooBig; - /// A RecursiveASTVisitor that fills a map of statements to PGO counters. - struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> { - /// The next counter value to assign. - unsigned NextCounter; - /// The function hash. - PGOHash Hash; - /// The map of statements to counters. - llvm::DenseMap<const Stmt *, unsigned> &CounterMap; - - MapRegionCounters(llvm::DenseMap<const Stmt *, unsigned> &CounterMap) - : NextCounter(0), CounterMap(CounterMap) {} - - // Blocks and lambdas are handled as separate functions, so we need not - // traverse them in the parent context. - bool TraverseBlockExpr(BlockExpr *BE) { return true; } - bool TraverseLambdaBody(LambdaExpr *LE) { return true; } - bool TraverseCapturedStmt(CapturedStmt *CS) { return true; } - - bool VisitDecl(const Decl *D) { - switch (D->getKind()) { - default: - break; - case Decl::Function: - case Decl::CXXMethod: - case Decl::CXXConstructor: - case Decl::CXXDestructor: - case Decl::CXXConversion: - case Decl::ObjCMethod: - case Decl::Block: - case Decl::Captured: - CounterMap[D->getBody()] = NextCounter++; - break; - } - return true; +/// A RecursiveASTVisitor that fills a map of statements to PGO counters. +struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> { + /// The next counter value to assign. + unsigned NextCounter; + /// The function hash. + PGOHash Hash; + /// The map of statements to counters. + llvm::DenseMap<const Stmt *, unsigned> &CounterMap; + + MapRegionCounters(llvm::DenseMap<const Stmt *, unsigned> &CounterMap) + : NextCounter(0), CounterMap(CounterMap) {} + + // Blocks and lambdas are handled as separate functions, so we need not + // traverse them in the parent context. + bool TraverseBlockExpr(BlockExpr *BE) { return true; } + bool TraverseLambdaBody(LambdaExpr *LE) { return true; } + bool TraverseCapturedStmt(CapturedStmt *CS) { return true; } + + bool VisitDecl(const Decl *D) { + switch (D->getKind()) { + default: + break; + case Decl::Function: + case Decl::CXXMethod: + case Decl::CXXConstructor: + case Decl::CXXDestructor: + case Decl::CXXConversion: + case Decl::ObjCMethod: + case Decl::Block: + case Decl::Captured: + CounterMap[D->getBody()] = NextCounter++; + break; } + return true; + } - bool VisitStmt(const Stmt *S) { - auto Type = getHashType(S); - if (Type == PGOHash::None) - return true; - - CounterMap[S] = NextCounter++; - Hash.combine(Type); + bool VisitStmt(const Stmt *S) { + auto Type = getHashType(S); + if (Type == PGOHash::None) return true; + + CounterMap[S] = NextCounter++; + Hash.combine(Type); + return true; + } + PGOHash::HashType getHashType(const Stmt *S) { + switch (S->getStmtClass()) { + default: + break; + case Stmt::LabelStmtClass: + return PGOHash::LabelStmt; + case Stmt::WhileStmtClass: + return PGOHash::WhileStmt; + case Stmt::DoStmtClass: + return PGOHash::DoStmt; + case Stmt::ForStmtClass: + return PGOHash::ForStmt; + case Stmt::CXXForRangeStmtClass: + return PGOHash::CXXForRangeStmt; + case Stmt::ObjCForCollectionStmtClass: + return PGOHash::ObjCForCollectionStmt; + case Stmt::SwitchStmtClass: + return PGOHash::SwitchStmt; + case Stmt::CaseStmtClass: + return PGOHash::CaseStmt; + case Stmt::DefaultStmtClass: + return PGOHash::DefaultStmt; + case Stmt::IfStmtClass: + return PGOHash::IfStmt; + case Stmt::CXXTryStmtClass: + return PGOHash::CXXTryStmt; + case Stmt::CXXCatchStmtClass: + return PGOHash::CXXCatchStmt; + case Stmt::ConditionalOperatorClass: + return PGOHash::ConditionalOperator; + case Stmt::BinaryConditionalOperatorClass: + return PGOHash::BinaryConditionalOperator; + case Stmt::BinaryOperatorClass: { + const BinaryOperator *BO = cast<BinaryOperator>(S); + if (BO->getOpcode() == BO_LAnd) + return PGOHash::BinaryOperatorLAnd; + if (BO->getOpcode() == BO_LOr) + return PGOHash::BinaryOperatorLOr; + break; } - PGOHash::HashType getHashType(const Stmt *S) { - switch (S->getStmtClass()) { - default: - break; - case Stmt::LabelStmtClass: - return PGOHash::LabelStmt; - case Stmt::WhileStmtClass: - return PGOHash::WhileStmt; - case Stmt::DoStmtClass: - return PGOHash::DoStmt; - case Stmt::ForStmtClass: - return PGOHash::ForStmt; - case Stmt::CXXForRangeStmtClass: - return PGOHash::CXXForRangeStmt; - case Stmt::ObjCForCollectionStmtClass: - return PGOHash::ObjCForCollectionStmt; - case Stmt::SwitchStmtClass: - return PGOHash::SwitchStmt; - case Stmt::CaseStmtClass: - return PGOHash::CaseStmt; - case Stmt::DefaultStmtClass: - return PGOHash::DefaultStmt; - case Stmt::IfStmtClass: - return PGOHash::IfStmt; - case Stmt::CXXTryStmtClass: - return PGOHash::CXXTryStmt; - case Stmt::CXXCatchStmtClass: - return PGOHash::CXXCatchStmt; - case Stmt::ConditionalOperatorClass: - return PGOHash::ConditionalOperator; - case Stmt::BinaryConditionalOperatorClass: - return PGOHash::BinaryConditionalOperator; - case Stmt::BinaryOperatorClass: { - const BinaryOperator *BO = cast<BinaryOperator>(S); - if (BO->getOpcode() == BO_LAnd) - return PGOHash::BinaryOperatorLAnd; - if (BO->getOpcode() == BO_LOr) - return PGOHash::BinaryOperatorLOr; - break; - } - } - return PGOHash::None; } + return PGOHash::None; + } +}; + +/// A StmtVisitor that propagates the raw counts through the AST and +/// records the count at statements where the value may change. +struct ComputeRegionCounts : public ConstStmtVisitor<ComputeRegionCounts> { + /// PGO state. + CodeGenPGO &PGO; + + /// A flag that is set when the current count should be recorded on the + /// next statement, such as at the exit of a loop. + bool RecordNextStmtCount; + + /// The count at the current location in the traversal. + uint64_t CurrentCount; + + /// The map of statements to count values. + llvm::DenseMap<const Stmt *, uint64_t> &CountMap; + + /// BreakContinueStack - Keep counts of breaks and continues inside loops. + struct BreakContinue { + uint64_t BreakCount; + uint64_t ContinueCount; + BreakContinue() : BreakCount(0), ContinueCount(0) {} }; + SmallVector<BreakContinue, 8> BreakContinueStack; - /// A StmtVisitor that propagates the raw counts through the AST and - /// records the count at statements where the value may change. - struct ComputeRegionCounts : public ConstStmtVisitor<ComputeRegionCounts> { - /// PGO state. - CodeGenPGO &PGO; - - /// A flag that is set when the current count should be recorded on the - /// next statement, such as at the exit of a loop. - bool RecordNextStmtCount; - - /// The map of statements to count values. - llvm::DenseMap<const Stmt *, uint64_t> &CountMap; - - /// BreakContinueStack - Keep counts of breaks and continues inside loops. - struct BreakContinue { - uint64_t BreakCount; - uint64_t ContinueCount; - BreakContinue() : BreakCount(0), ContinueCount(0) {} - }; - SmallVector<BreakContinue, 8> BreakContinueStack; - - ComputeRegionCounts(llvm::DenseMap<const Stmt *, uint64_t> &CountMap, - CodeGenPGO &PGO) - : PGO(PGO), RecordNextStmtCount(false), CountMap(CountMap) {} - - void RecordStmtCount(const Stmt *S) { - if (RecordNextStmtCount) { - CountMap[S] = PGO.getCurrentRegionCount(); - RecordNextStmtCount = false; - } - } + ComputeRegionCounts(llvm::DenseMap<const Stmt *, uint64_t> &CountMap, + CodeGenPGO &PGO) + : PGO(PGO), RecordNextStmtCount(false), CountMap(CountMap) {} - void VisitStmt(const Stmt *S) { - RecordStmtCount(S); - for (Stmt::const_child_range I = S->children(); I; ++I) { - if (*I) - this->Visit(*I); - } + void RecordStmtCount(const Stmt *S) { + if (RecordNextStmtCount) { + CountMap[S] = CurrentCount; + RecordNextStmtCount = false; } + } - void VisitFunctionDecl(const FunctionDecl *D) { - // Counter tracks entry to the function body. - RegionCounter Cnt(PGO, D->getBody()); - Cnt.beginRegion(); - CountMap[D->getBody()] = PGO.getCurrentRegionCount(); - Visit(D->getBody()); - } + /// Set and return the current count. + uint64_t setCount(uint64_t Count) { + CurrentCount = Count; + return Count; + } - // Skip lambda expressions. We visit these as FunctionDecls when we're - // generating them and aren't interested in the body when generating a - // parent context. - void VisitLambdaExpr(const LambdaExpr *LE) {} - - void VisitCapturedDecl(const CapturedDecl *D) { - // Counter tracks entry to the capture body. - RegionCounter Cnt(PGO, D->getBody()); - Cnt.beginRegion(); - CountMap[D->getBody()] = PGO.getCurrentRegionCount(); - Visit(D->getBody()); + void VisitStmt(const Stmt *S) { + RecordStmtCount(S); + for (Stmt::const_child_range I = S->children(); I; ++I) { + if (*I) + this->Visit(*I); } + } - void VisitObjCMethodDecl(const ObjCMethodDecl *D) { - // Counter tracks entry to the method body. - RegionCounter Cnt(PGO, D->getBody()); - Cnt.beginRegion(); - CountMap[D->getBody()] = PGO.getCurrentRegionCount(); - Visit(D->getBody()); - } + void VisitFunctionDecl(const FunctionDecl *D) { + // Counter tracks entry to the function body. + uint64_t BodyCount = setCount(PGO.getRegionCount(D->getBody())); + CountMap[D->getBody()] = BodyCount; + Visit(D->getBody()); + } - void VisitBlockDecl(const BlockDecl *D) { - // Counter tracks entry to the block body. - RegionCounter Cnt(PGO, D->getBody()); - Cnt.beginRegion(); - CountMap[D->getBody()] = PGO.getCurrentRegionCount(); - Visit(D->getBody()); - } + // Skip lambda expressions. We visit these as FunctionDecls when we're + // generating them and aren't interested in the body when generating a + // parent context. + void VisitLambdaExpr(const LambdaExpr *LE) {} - void VisitReturnStmt(const ReturnStmt *S) { - RecordStmtCount(S); - if (S->getRetValue()) - Visit(S->getRetValue()); - PGO.setCurrentRegionUnreachable(); - RecordNextStmtCount = true; - } + void VisitCapturedDecl(const CapturedDecl *D) { + // Counter tracks entry to the capture body. + uint64_t BodyCount = setCount(PGO.getRegionCount(D->getBody())); + CountMap[D->getBody()] = BodyCount; + Visit(D->getBody()); + } - void VisitGotoStmt(const GotoStmt *S) { - RecordStmtCount(S); - PGO.setCurrentRegionUnreachable(); - RecordNextStmtCount = true; - } + void VisitObjCMethodDecl(const ObjCMethodDecl *D) { + // Counter tracks entry to the method body. + uint64_t BodyCount = setCount(PGO.getRegionCount(D->getBody())); + CountMap[D->getBody()] = BodyCount; + Visit(D->getBody()); + } - void VisitLabelStmt(const LabelStmt *S) { - RecordNextStmtCount = false; - // Counter tracks the block following the label. - RegionCounter Cnt(PGO, S); - Cnt.beginRegion(); - CountMap[S] = PGO.getCurrentRegionCount(); - Visit(S->getSubStmt()); - } + void VisitBlockDecl(const BlockDecl *D) { + // Counter tracks entry to the block body. + uint64_t BodyCount = setCount(PGO.getRegionCount(D->getBody())); + CountMap[D->getBody()] = BodyCount; + Visit(D->getBody()); + } - void VisitBreakStmt(const BreakStmt *S) { - RecordStmtCount(S); - assert(!BreakContinueStack.empty() && "break not in a loop or switch!"); - BreakContinueStack.back().BreakCount += PGO.getCurrentRegionCount(); - PGO.setCurrentRegionUnreachable(); - RecordNextStmtCount = true; - } + void VisitReturnStmt(const ReturnStmt *S) { + RecordStmtCount(S); + if (S->getRetValue()) + Visit(S->getRetValue()); + CurrentCount = 0; + RecordNextStmtCount = true; + } - void VisitContinueStmt(const ContinueStmt *S) { - RecordStmtCount(S); - assert(!BreakContinueStack.empty() && "continue stmt not in a loop!"); - BreakContinueStack.back().ContinueCount += PGO.getCurrentRegionCount(); - PGO.setCurrentRegionUnreachable(); - RecordNextStmtCount = true; - } + void VisitCXXThrowExpr(const CXXThrowExpr *E) { + RecordStmtCount(E); + if (E->getSubExpr()) + Visit(E->getSubExpr()); + CurrentCount = 0; + RecordNextStmtCount = true; + } - void VisitWhileStmt(const WhileStmt *S) { - RecordStmtCount(S); - // Counter tracks the body of the loop. - RegionCounter Cnt(PGO, S); - BreakContinueStack.push_back(BreakContinue()); - // Visit the body region first so the break/continue adjustments can be - // included when visiting the condition. - Cnt.beginRegion(); - CountMap[S->getBody()] = PGO.getCurrentRegionCount(); - Visit(S->getBody()); - Cnt.adjustForControlFlow(); - - // ...then go back and propagate counts through the condition. The count - // at the start of the condition is the sum of the incoming edges, - // the backedge from the end of the loop body, and the edges from - // continue statements. - BreakContinue BC = BreakContinueStack.pop_back_val(); - Cnt.setCurrentRegionCount(Cnt.getParentCount() + - Cnt.getAdjustedCount() + BC.ContinueCount); - CountMap[S->getCond()] = PGO.getCurrentRegionCount(); - Visit(S->getCond()); - Cnt.adjustForControlFlow(); - Cnt.applyAdjustmentsToRegion(BC.BreakCount + BC.ContinueCount); - RecordNextStmtCount = true; - } + void VisitGotoStmt(const GotoStmt *S) { + RecordStmtCount(S); + CurrentCount = 0; + RecordNextStmtCount = true; + } - void VisitDoStmt(const DoStmt *S) { - RecordStmtCount(S); - // Counter tracks the body of the loop. - RegionCounter Cnt(PGO, S); - BreakContinueStack.push_back(BreakContinue()); - Cnt.beginRegion(/*AddIncomingFallThrough=*/true); - CountMap[S->getBody()] = PGO.getCurrentRegionCount(); - Visit(S->getBody()); - Cnt.adjustForControlFlow(); - - BreakContinue BC = BreakContinueStack.pop_back_val(); - // The count at the start of the condition is equal to the count at the - // end of the body. The adjusted count does not include either the - // fall-through count coming into the loop or the continue count, so add - // both of those separately. This is coincidentally the same equation as - // with while loops but for different reasons. - Cnt.setCurrentRegionCount(Cnt.getParentCount() + - Cnt.getAdjustedCount() + BC.ContinueCount); - CountMap[S->getCond()] = PGO.getCurrentRegionCount(); - Visit(S->getCond()); - Cnt.adjustForControlFlow(); - Cnt.applyAdjustmentsToRegion(BC.BreakCount + BC.ContinueCount); - RecordNextStmtCount = true; - } + void VisitLabelStmt(const LabelStmt *S) { + RecordNextStmtCount = false; + // Counter tracks the block following the label. + uint64_t BlockCount = setCount(PGO.getRegionCount(S)); + CountMap[S] = BlockCount; + Visit(S->getSubStmt()); + } - void VisitForStmt(const ForStmt *S) { - RecordStmtCount(S); - if (S->getInit()) - Visit(S->getInit()); - // Counter tracks the body of the loop. - RegionCounter Cnt(PGO, S); - BreakContinueStack.push_back(BreakContinue()); - // Visit the body region first. (This is basically the same as a while - // loop; see further comments in VisitWhileStmt.) - Cnt.beginRegion(); - CountMap[S->getBody()] = PGO.getCurrentRegionCount(); - Visit(S->getBody()); - Cnt.adjustForControlFlow(); - - // The increment is essentially part of the body but it needs to include - // the count for all the continue statements. - if (S->getInc()) { - Cnt.setCurrentRegionCount(PGO.getCurrentRegionCount() + - BreakContinueStack.back().ContinueCount); - CountMap[S->getInc()] = PGO.getCurrentRegionCount(); - Visit(S->getInc()); - Cnt.adjustForControlFlow(); - } - - BreakContinue BC = BreakContinueStack.pop_back_val(); - - // ...then go back and propagate counts through the condition. - if (S->getCond()) { - Cnt.setCurrentRegionCount(Cnt.getParentCount() + - Cnt.getAdjustedCount() + - BC.ContinueCount); - CountMap[S->getCond()] = PGO.getCurrentRegionCount(); - Visit(S->getCond()); - Cnt.adjustForControlFlow(); - } - Cnt.applyAdjustmentsToRegion(BC.BreakCount + BC.ContinueCount); - RecordNextStmtCount = true; - } + void VisitBreakStmt(const BreakStmt *S) { + RecordStmtCount(S); + assert(!BreakContinueStack.empty() && "break not in a loop or switch!"); + BreakContinueStack.back().BreakCount += CurrentCount; + CurrentCount = 0; + RecordNextStmtCount = true; + } - void VisitCXXForRangeStmt(const CXXForRangeStmt *S) { - RecordStmtCount(S); - Visit(S->getRangeStmt()); - Visit(S->getBeginEndStmt()); - // Counter tracks the body of the loop. - RegionCounter Cnt(PGO, S); - BreakContinueStack.push_back(BreakContinue()); - // Visit the body region first. (This is basically the same as a while - // loop; see further comments in VisitWhileStmt.) - Cnt.beginRegion(); - CountMap[S->getLoopVarStmt()] = PGO.getCurrentRegionCount(); - Visit(S->getLoopVarStmt()); - Visit(S->getBody()); - Cnt.adjustForControlFlow(); - - // The increment is essentially part of the body but it needs to include - // the count for all the continue statements. - Cnt.setCurrentRegionCount(PGO.getCurrentRegionCount() + - BreakContinueStack.back().ContinueCount); - CountMap[S->getInc()] = PGO.getCurrentRegionCount(); - Visit(S->getInc()); - Cnt.adjustForControlFlow(); + void VisitContinueStmt(const ContinueStmt *S) { + RecordStmtCount(S); + assert(!BreakContinueStack.empty() && "continue stmt not in a loop!"); + BreakContinueStack.back().ContinueCount += CurrentCount; + CurrentCount = 0; + RecordNextStmtCount = true; + } - BreakContinue BC = BreakContinueStack.pop_back_val(); + void VisitWhileStmt(const WhileStmt *S) { + RecordStmtCount(S); + uint64_t ParentCount = CurrentCount; + + BreakContinueStack.push_back(BreakContinue()); + // Visit the body region first so the break/continue adjustments can be + // included when visiting the condition. + uint64_t BodyCount = setCount(PGO.getRegionCount(S)); + CountMap[S->getBody()] = CurrentCount; + Visit(S->getBody()); + uint64_t BackedgeCount = CurrentCount; + + // ...then go back and propagate counts through the condition. The count + // at the start of the condition is the sum of the incoming edges, + // the backedge from the end of the loop body, and the edges from + // continue statements. + BreakContinue BC = BreakContinueStack.pop_back_val(); + uint64_t CondCount = + setCount(ParentCount + BackedgeCount + BC.ContinueCount); + CountMap[S->getCond()] = CondCount; + Visit(S->getCond()); + setCount(BC.BreakCount + CondCount - BodyCount); + RecordNextStmtCount = true; + } - // ...then go back and propagate counts through the condition. - Cnt.setCurrentRegionCount(Cnt.getParentCount() + - Cnt.getAdjustedCount() + - BC.ContinueCount); - CountMap[S->getCond()] = PGO.getCurrentRegionCount(); - Visit(S->getCond()); - Cnt.adjustForControlFlow(); - Cnt.applyAdjustmentsToRegion(BC.BreakCount + BC.ContinueCount); - RecordNextStmtCount = true; - } + void VisitDoStmt(const DoStmt *S) { + RecordStmtCount(S); + uint64_t LoopCount = PGO.getRegionCount(S); + + BreakContinueStack.push_back(BreakContinue()); + // The count doesn't include the fallthrough from the parent scope. Add it. + uint64_t BodyCount = setCount(LoopCount + CurrentCount); + CountMap[S->getBody()] = BodyCount; + Visit(S->getBody()); + uint64_t BackedgeCount = CurrentCount; + + BreakContinue BC = BreakContinueStack.pop_back_val(); + // The count at the start of the condition is equal to the count at the + // end of the body, plus any continues. + uint64_t CondCount = setCount(BackedgeCount + BC.ContinueCount); + CountMap[S->getCond()] = CondCount; + Visit(S->getCond()); + setCount(BC.BreakCount + CondCount - LoopCount); + RecordNextStmtCount = true; + } - void VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S) { - RecordStmtCount(S); - Visit(S->getElement()); - // Counter tracks the body of the loop. - RegionCounter Cnt(PGO, S); - BreakContinueStack.push_back(BreakContinue()); - Cnt.beginRegion(); - CountMap[S->getBody()] = PGO.getCurrentRegionCount(); - Visit(S->getBody()); - BreakContinue BC = BreakContinueStack.pop_back_val(); - Cnt.adjustForControlFlow(); - Cnt.applyAdjustmentsToRegion(BC.BreakCount + BC.ContinueCount); - RecordNextStmtCount = true; + void VisitForStmt(const ForStmt *S) { + RecordStmtCount(S); + if (S->getInit()) + Visit(S->getInit()); + + uint64_t ParentCount = CurrentCount; + + BreakContinueStack.push_back(BreakContinue()); + // Visit the body region first. (This is basically the same as a while + // loop; see further comments in VisitWhileStmt.) + uint64_t BodyCount = setCount(PGO.getRegionCount(S)); + CountMap[S->getBody()] = BodyCount; + Visit(S->getBody()); + uint64_t BackedgeCount = CurrentCount; + BreakContinue BC = BreakContinueStack.pop_back_val(); + + // The increment is essentially part of the body but it needs to include + // the count for all the continue statements. + if (S->getInc()) { + uint64_t IncCount = setCount(BackedgeCount + BC.ContinueCount); + CountMap[S->getInc()] = IncCount; + Visit(S->getInc()); } - void VisitSwitchStmt(const SwitchStmt *S) { - RecordStmtCount(S); + // ...then go back and propagate counts through the condition. + uint64_t CondCount = + setCount(ParentCount + BackedgeCount + BC.ContinueCount); + if (S->getCond()) { + CountMap[S->getCond()] = CondCount; Visit(S->getCond()); - PGO.setCurrentRegionUnreachable(); - BreakContinueStack.push_back(BreakContinue()); - Visit(S->getBody()); - // If the switch is inside a loop, add the continue counts. - BreakContinue BC = BreakContinueStack.pop_back_val(); - if (!BreakContinueStack.empty()) - BreakContinueStack.back().ContinueCount += BC.ContinueCount; - // Counter tracks the exit block of the switch. - RegionCounter ExitCnt(PGO, S); - ExitCnt.beginRegion(); - RecordNextStmtCount = true; } + setCount(BC.BreakCount + CondCount - BodyCount); + RecordNextStmtCount = true; + } - void VisitCaseStmt(const CaseStmt *S) { - RecordNextStmtCount = false; - // Counter for this particular case. This counts only jumps from the - // switch header and does not include fallthrough from the case before - // this one. - RegionCounter Cnt(PGO, S); - Cnt.beginRegion(/*AddIncomingFallThrough=*/true); - CountMap[S] = Cnt.getCount(); - RecordNextStmtCount = true; - Visit(S->getSubStmt()); - } + void VisitCXXForRangeStmt(const CXXForRangeStmt *S) { + RecordStmtCount(S); + Visit(S->getLoopVarStmt()); + Visit(S->getRangeStmt()); + Visit(S->getBeginEndStmt()); + + uint64_t ParentCount = CurrentCount; + BreakContinueStack.push_back(BreakContinue()); + // Visit the body region first. (This is basically the same as a while + // loop; see further comments in VisitWhileStmt.) + uint64_t BodyCount = setCount(PGO.getRegionCount(S)); + CountMap[S->getBody()] = BodyCount; + Visit(S->getBody()); + uint64_t BackedgeCount = CurrentCount; + BreakContinue BC = BreakContinueStack.pop_back_val(); + + // The increment is essentially part of the body but it needs to include + // the count for all the continue statements. + uint64_t IncCount = setCount(BackedgeCount + BC.ContinueCount); + CountMap[S->getInc()] = IncCount; + Visit(S->getInc()); + + // ...then go back and propagate counts through the condition. + uint64_t CondCount = + setCount(ParentCount + BackedgeCount + BC.ContinueCount); + CountMap[S->getCond()] = CondCount; + Visit(S->getCond()); + setCount(BC.BreakCount + CondCount - BodyCount); + RecordNextStmtCount = true; + } - void VisitDefaultStmt(const DefaultStmt *S) { - RecordNextStmtCount = false; - // Counter for this default case. This does not include fallthrough from - // the previous case. - RegionCounter Cnt(PGO, S); - Cnt.beginRegion(/*AddIncomingFallThrough=*/true); - CountMap[S] = Cnt.getCount(); - RecordNextStmtCount = true; - Visit(S->getSubStmt()); - } + void VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S) { + RecordStmtCount(S); + Visit(S->getElement()); + uint64_t ParentCount = CurrentCount; + BreakContinueStack.push_back(BreakContinue()); + // Counter tracks the body of the loop. + uint64_t BodyCount = setCount(PGO.getRegionCount(S)); + CountMap[S->getBody()] = BodyCount; + Visit(S->getBody()); + uint64_t BackedgeCount = CurrentCount; + BreakContinue BC = BreakContinueStack.pop_back_val(); + + setCount(BC.BreakCount + ParentCount + BackedgeCount + BC.ContinueCount - + BodyCount); + RecordNextStmtCount = true; + } - void VisitIfStmt(const IfStmt *S) { - RecordStmtCount(S); - // Counter tracks the "then" part of an if statement. The count for - // the "else" part, if it exists, will be calculated from this counter. - RegionCounter Cnt(PGO, S); - Visit(S->getCond()); + void VisitSwitchStmt(const SwitchStmt *S) { + RecordStmtCount(S); + Visit(S->getCond()); + CurrentCount = 0; + BreakContinueStack.push_back(BreakContinue()); + Visit(S->getBody()); + // If the switch is inside a loop, add the continue counts. + BreakContinue BC = BreakContinueStack.pop_back_val(); + if (!BreakContinueStack.empty()) + BreakContinueStack.back().ContinueCount += BC.ContinueCount; + // Counter tracks the exit block of the switch. + setCount(PGO.getRegionCount(S)); + RecordNextStmtCount = true; + } - Cnt.beginRegion(); - CountMap[S->getThen()] = PGO.getCurrentRegionCount(); - Visit(S->getThen()); - Cnt.adjustForControlFlow(); - - if (S->getElse()) { - Cnt.beginElseRegion(); - CountMap[S->getElse()] = PGO.getCurrentRegionCount(); - Visit(S->getElse()); - Cnt.adjustForControlFlow(); - } - Cnt.applyAdjustmentsToRegion(0); - RecordNextStmtCount = true; - } + void VisitSwitchCase(const SwitchCase *S) { + RecordNextStmtCount = false; + // Counter for this particular case. This counts only jumps from the + // switch header and does not include fallthrough from the case before + // this one. + uint64_t CaseCount = PGO.getRegionCount(S); + setCount(CurrentCount + CaseCount); + // We need the count without fallthrough in the mapping, so it's more useful + // for branch probabilities. + CountMap[S] = CaseCount; + RecordNextStmtCount = true; + Visit(S->getSubStmt()); + } - void VisitCXXTryStmt(const CXXTryStmt *S) { - RecordStmtCount(S); - Visit(S->getTryBlock()); - for (unsigned I = 0, E = S->getNumHandlers(); I < E; ++I) - Visit(S->getHandler(I)); - // Counter tracks the continuation block of the try statement. - RegionCounter Cnt(PGO, S); - Cnt.beginRegion(); - RecordNextStmtCount = true; - } + void VisitIfStmt(const IfStmt *S) { + RecordStmtCount(S); + uint64_t ParentCount = CurrentCount; + Visit(S->getCond()); + + // Counter tracks the "then" part of an if statement. The count for + // the "else" part, if it exists, will be calculated from this counter. + uint64_t ThenCount = setCount(PGO.getRegionCount(S)); + CountMap[S->getThen()] = ThenCount; + Visit(S->getThen()); + uint64_t OutCount = CurrentCount; + + uint64_t ElseCount = ParentCount - ThenCount; + if (S->getElse()) { + setCount(ElseCount); + CountMap[S->getElse()] = ElseCount; + Visit(S->getElse()); + OutCount += CurrentCount; + } else + OutCount += ElseCount; + setCount(OutCount); + RecordNextStmtCount = true; + } - void VisitCXXCatchStmt(const CXXCatchStmt *S) { - RecordNextStmtCount = false; - // Counter tracks the catch statement's handler block. - RegionCounter Cnt(PGO, S); - Cnt.beginRegion(); - CountMap[S] = PGO.getCurrentRegionCount(); - Visit(S->getHandlerBlock()); - } + void VisitCXXTryStmt(const CXXTryStmt *S) { + RecordStmtCount(S); + Visit(S->getTryBlock()); + for (unsigned I = 0, E = S->getNumHandlers(); I < E; ++I) + Visit(S->getHandler(I)); + // Counter tracks the continuation block of the try statement. + setCount(PGO.getRegionCount(S)); + RecordNextStmtCount = true; + } - void VisitAbstractConditionalOperator( - const AbstractConditionalOperator *E) { - RecordStmtCount(E); - // Counter tracks the "true" part of a conditional operator. The - // count in the "false" part will be calculated from this counter. - RegionCounter Cnt(PGO, E); - Visit(E->getCond()); - - Cnt.beginRegion(); - CountMap[E->getTrueExpr()] = PGO.getCurrentRegionCount(); - Visit(E->getTrueExpr()); - Cnt.adjustForControlFlow(); - - Cnt.beginElseRegion(); - CountMap[E->getFalseExpr()] = PGO.getCurrentRegionCount(); - Visit(E->getFalseExpr()); - Cnt.adjustForControlFlow(); - - Cnt.applyAdjustmentsToRegion(0); - RecordNextStmtCount = true; - } + void VisitCXXCatchStmt(const CXXCatchStmt *S) { + RecordNextStmtCount = false; + // Counter tracks the catch statement's handler block. + uint64_t CatchCount = setCount(PGO.getRegionCount(S)); + CountMap[S] = CatchCount; + Visit(S->getHandlerBlock()); + } - void VisitBinLAnd(const BinaryOperator *E) { - RecordStmtCount(E); - // Counter tracks the right hand side of a logical and operator. - RegionCounter Cnt(PGO, E); - Visit(E->getLHS()); - Cnt.beginRegion(); - CountMap[E->getRHS()] = PGO.getCurrentRegionCount(); - Visit(E->getRHS()); - Cnt.adjustForControlFlow(); - Cnt.applyAdjustmentsToRegion(0); - RecordNextStmtCount = true; - } + void VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { + RecordStmtCount(E); + uint64_t ParentCount = CurrentCount; + Visit(E->getCond()); + + // Counter tracks the "true" part of a conditional operator. The + // count in the "false" part will be calculated from this counter. + uint64_t TrueCount = setCount(PGO.getRegionCount(E)); + CountMap[E->getTrueExpr()] = TrueCount; + Visit(E->getTrueExpr()); + uint64_t OutCount = CurrentCount; + + uint64_t FalseCount = setCount(ParentCount - TrueCount); + CountMap[E->getFalseExpr()] = FalseCount; + Visit(E->getFalseExpr()); + OutCount += CurrentCount; + + setCount(OutCount); + RecordNextStmtCount = true; + } - void VisitBinLOr(const BinaryOperator *E) { - RecordStmtCount(E); - // Counter tracks the right hand side of a logical or operator. - RegionCounter Cnt(PGO, E); - Visit(E->getLHS()); - Cnt.beginRegion(); - CountMap[E->getRHS()] = PGO.getCurrentRegionCount(); - Visit(E->getRHS()); - Cnt.adjustForControlFlow(); - Cnt.applyAdjustmentsToRegion(0); - RecordNextStmtCount = true; - } - }; + void VisitBinLAnd(const BinaryOperator *E) { + RecordStmtCount(E); + uint64_t ParentCount = CurrentCount; + Visit(E->getLHS()); + // Counter tracks the right hand side of a logical and operator. + uint64_t RHSCount = setCount(PGO.getRegionCount(E)); + CountMap[E->getRHS()] = RHSCount; + Visit(E->getRHS()); + setCount(ParentCount + RHSCount - CurrentCount); + RecordNextStmtCount = true; + } + + void VisitBinLOr(const BinaryOperator *E) { + RecordStmtCount(E); + uint64_t ParentCount = CurrentCount; + Visit(E->getLHS()); + // Counter tracks the right hand side of a logical or operator. + uint64_t RHSCount = setCount(PGO.getRegionCount(E)); + CountMap[E->getRHS()] = RHSCount; + Visit(E->getRHS()); + setCount(ParentCount + RHSCount - CurrentCount); + RecordNextStmtCount = true; + } +}; } void PGOHash::combine(HashType Type) { @@ -728,12 +719,10 @@ void CodeGenPGO::emitCounterRegionMapping(const Decl *D) { } void -CodeGenPGO::emitEmptyCounterMapping(const Decl *D, StringRef FuncName, +CodeGenPGO::emitEmptyCounterMapping(const Decl *D, StringRef Name, llvm::GlobalValue::LinkageTypes Linkage) { if (SkipCoverageMapping) return; - setFuncName(FuncName, Linkage); - // Don't map the functions inside the system headers auto Loc = D->getBody()->getLocStart(); if (CGM.getContext().getSourceManager().isInSystemHeader(Loc)) @@ -750,6 +739,7 @@ CodeGenPGO::emitEmptyCounterMapping(const Decl *D, StringRef FuncName, if (CoverageMapping.empty()) return; + setFuncName(Name, Linkage); CGM.getCoverageMapping()->addFunctionMappingRecord( FuncNameVar, FuncName, FunctionHash, CoverageMapping); } @@ -785,17 +775,19 @@ CodeGenPGO::applyFunctionAttributes(llvm::IndexedInstrProfReader *PGOReader, Fn->addFnAttr(llvm::Attribute::Cold); } -void CodeGenPGO::emitCounterIncrement(CGBuilderTy &Builder, unsigned Counter) { +void CodeGenPGO::emitCounterIncrement(CGBuilderTy &Builder, const Stmt *S) { if (!CGM.getCodeGenOpts().ProfileInstrGenerate || !RegionCounterMap) return; if (!Builder.GetInsertPoint()) return; + + unsigned Counter = (*RegionCounterMap)[S]; auto *I8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext()); - Builder.CreateCall4(CGM.getIntrinsic(llvm::Intrinsic::instrprof_increment), - llvm::ConstantExpr::getBitCast(FuncNameVar, I8PtrTy), + Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::instrprof_increment), + {llvm::ConstantExpr::getBitCast(FuncNameVar, I8PtrTy), Builder.getInt64(FunctionHash), Builder.getInt32(NumRegionCounters), - Builder.getInt32(Counter)); + Builder.getInt32(Counter)}); } void CodeGenPGO::loadRegionCounts(llvm::IndexedInstrProfReader *PGOReader, @@ -839,8 +831,8 @@ static uint32_t scaleBranchWeight(uint64_t Weight, uint64_t Scale) { return Scaled; } -llvm::MDNode *CodeGenPGO::createBranchWeights(uint64_t TrueCount, - uint64_t FalseCount) { +llvm::MDNode *CodeGenFunction::createProfileWeights(uint64_t TrueCount, + uint64_t FalseCount) { // Check for empty weights. if (!TrueCount && !FalseCount) return nullptr; @@ -853,7 +845,8 @@ llvm::MDNode *CodeGenPGO::createBranchWeights(uint64_t TrueCount, scaleBranchWeight(FalseCount, Scale)); } -llvm::MDNode *CodeGenPGO::createBranchWeights(ArrayRef<uint64_t> Weights) { +llvm::MDNode * +CodeGenFunction::createProfileWeights(ArrayRef<uint64_t> Weights) { // We need at least two elements to create meaningful weights. if (Weights.size() < 2) return nullptr; @@ -875,17 +868,14 @@ llvm::MDNode *CodeGenPGO::createBranchWeights(ArrayRef<uint64_t> Weights) { return MDHelper.createBranchWeights(ScaledWeights); } -llvm::MDNode *CodeGenPGO::createLoopWeights(const Stmt *Cond, - RegionCounter &Cnt) { - if (!haveRegionCounts()) +llvm::MDNode *CodeGenFunction::createProfileWeightsForLoop(const Stmt *Cond, + uint64_t LoopCount) { + if (!PGO.haveRegionCounts()) return nullptr; - uint64_t LoopCount = Cnt.getCount(); - uint64_t CondCount = 0; - bool Found = getStmtCount(Cond, CondCount); - assert(Found && "missing expected loop condition count"); - (void)Found; - if (CondCount == 0) + Optional<uint64_t> CondCount = PGO.getStmtCount(Cond); + assert(CondCount.hasValue() && "missing expected loop condition count"); + if (*CondCount == 0) return nullptr; - return createBranchWeights(LoopCount, - std::max(CondCount, LoopCount) - LoopCount); + return createProfileWeights(LoopCount, + std::max(*CondCount, LoopCount) - LoopCount); } diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenPGO.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenPGO.h index 431c850..de6f369 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenPGO.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenPGO.h @@ -24,10 +24,8 @@ namespace clang { namespace CodeGen { -class RegionCounter; -/// Per-function PGO state. This class should generally not be used directly, -/// but instead through the CodeGenFunction and RegionCounter types. +/// Per-function PGO state. class CodeGenPGO { private: CodeGenModule &CGM; @@ -62,37 +60,24 @@ public: /// exits. void setCurrentRegionCount(uint64_t Count) { CurrentRegionCount = Count; } - /// Indicate that the current region is never reached, and thus should have a - /// counter value of zero. This is important so that subsequent regions can - /// correctly track their parent counts. - void setCurrentRegionUnreachable() { setCurrentRegionCount(0); } - /// Check if an execution count is known for a given statement. If so, return /// true and put the value in Count; else return false. - bool getStmtCount(const Stmt *S, uint64_t &Count) { + Optional<uint64_t> getStmtCount(const Stmt *S) { if (!StmtCountMap) - return false; - llvm::DenseMap<const Stmt*, uint64_t>::const_iterator - I = StmtCountMap->find(S); + return None; + auto I = StmtCountMap->find(S); if (I == StmtCountMap->end()) - return false; - Count = I->second; - return true; + return None; + return I->second; } /// If the execution count for the current statement is known, record that /// as the current count. void setCurrentStmt(const Stmt *S) { - uint64_t Count; - if (getStmtCount(S, Count)) - setCurrentRegionCount(Count); + if (auto Count = getStmtCount(S)) + setCurrentRegionCount(*Count); } - /// Calculate branch weights appropriate for PGO data - llvm::MDNode *createBranchWeights(uint64_t TrueCount, uint64_t FalseCount); - llvm::MDNode *createBranchWeights(ArrayRef<uint64_t> Weights); - llvm::MDNode *createLoopWeights(const Stmt *Cond, RegionCounter &Cnt); - /// Check if we need to emit coverage mapping for a given declaration void checkGlobalDecl(GlobalDecl GD); /// Assign counters to regions and configure them for PGO of a given @@ -117,110 +102,16 @@ private: void emitCounterVariables(); void emitCounterRegionMapping(const Decl *D); - /// Emit code to increment the counter at the given index - void emitCounterIncrement(CGBuilderTy &Builder, unsigned Counter); +public: + void emitCounterIncrement(CGBuilderTy &Builder, const Stmt *S); - /// Return the region counter for the given statement. This should only be - /// called on statements that have a dedicated counter. - unsigned getRegionCounter(const Stmt *S) { + /// Return the region count for the counter at the given index. + uint64_t getRegionCount(const Stmt *S) { if (!RegionCounterMap) return 0; - return (*RegionCounterMap)[S]; - } - - /// Return the region count for the counter at the given index. - uint64_t getRegionCount(unsigned Counter) { if (!haveRegionCounts()) return 0; - return RegionCounts[Counter]; - } - - friend class RegionCounter; -}; - -/// A counter for a particular region. This is the primary interface through -/// which clients manage PGO counters and their values. -class RegionCounter { - CodeGenPGO *PGO; - unsigned Counter; - uint64_t Count; - uint64_t ParentCount; - uint64_t RegionCount; - int64_t Adjust; - - RegionCounter(CodeGenPGO &PGO, unsigned CounterIndex) - : PGO(&PGO), Counter(CounterIndex), Count(PGO.getRegionCount(Counter)), - ParentCount(PGO.getCurrentRegionCount()), Adjust(0) {} - -public: - RegionCounter(CodeGenPGO &PGO, const Stmt *S) - : PGO(&PGO), Counter(PGO.getRegionCounter(S)), - Count(PGO.getRegionCount(Counter)), - ParentCount(PGO.getCurrentRegionCount()), Adjust(0) {} - - /// Get the value of the counter. In most cases this is the number of times - /// the region of the counter was entered, but for switch labels it's the - /// number of direct jumps to that label. - uint64_t getCount() const { return Count; } - - /// Get the value of the counter with adjustments applied. Adjustments occur - /// when control enters or leaves the region abnormally; i.e., if there is a - /// jump to a label within the region, or if the function can return from - /// within the region. The adjusted count, then, is the value of the counter - /// at the end of the region. - uint64_t getAdjustedCount() const { - return Count + Adjust; - } - - /// Get the value of the counter in this region's parent, i.e., the region - /// that was active when this region began. This is useful for deriving - /// counts in implicitly counted regions, like the false case of a condition - /// or the normal exits of a loop. - uint64_t getParentCount() const { return ParentCount; } - - /// Activate the counter by emitting an increment and starting to track - /// adjustments. If AddIncomingFallThrough is true, the current region count - /// will be added to the counter for the purposes of tracking the region. - void beginRegion(CGBuilderTy &Builder, bool AddIncomingFallThrough=false) { - beginRegion(AddIncomingFallThrough); - PGO->emitCounterIncrement(Builder, Counter); - } - void beginRegion(bool AddIncomingFallThrough=false) { - RegionCount = Count; - if (AddIncomingFallThrough) - RegionCount += PGO->getCurrentRegionCount(); - PGO->setCurrentRegionCount(RegionCount); - } - - /// For counters on boolean branches, begins tracking adjustments for the - /// uncounted path. - void beginElseRegion() { - RegionCount = ParentCount - Count; - PGO->setCurrentRegionCount(RegionCount); - } - - /// Reset the current region count. - void setCurrentRegionCount(uint64_t CurrentCount) { - RegionCount = CurrentCount; - PGO->setCurrentRegionCount(RegionCount); - } - - /// Adjust for non-local control flow after emitting a subexpression or - /// substatement. This must be called to account for constructs such as gotos, - /// labels, and returns, so that we can ensure that our region's count is - /// correct in the code that follows. - void adjustForControlFlow() { - Adjust += PGO->getCurrentRegionCount() - RegionCount; - // Reset the region count in case this is called again later. - RegionCount = PGO->getCurrentRegionCount(); - } - - /// Commit all adjustments to the current region. If the region is a loop, - /// the LoopAdjust value should be the count of all the breaks and continues - /// from the loop, to compensate for those counts being deducted from the - /// adjustments for the body of the loop. - void applyAdjustmentsToRegion(uint64_t LoopAdjust) { - PGO->setCurrentRegionCount(ParentCount + Adjust + LoopAdjust); + return RegionCounts[(*RegionCounterMap)[S]]; } }; diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp index 67a9fbe..e0f926c 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp @@ -715,9 +715,16 @@ bool CodeGenTypes::isZeroInitializable(QualType T) { // No need to check for member pointers when not compiling C++. if (!Context.getLangOpts().CPlusPlus) return true; - - T = Context.getBaseElementType(T); - + + if (const auto *AT = Context.getAsArrayType(T)) { + if (isa<IncompleteArrayType>(AT)) + return true; + if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) + if (Context.getConstantArrayElementCount(CAT) == 0) + return true; + T = Context.getBaseElementType(T); + } + // Records are non-zero-initializable if they contain any // non-zero-initializable subobjects. if (const RecordType *RT = T->getAs<RecordType>()) { @@ -733,6 +740,6 @@ bool CodeGenTypes::isZeroInitializable(QualType T) { return true; } -bool CodeGenTypes::isZeroInitializable(const CXXRecordDecl *RD) { +bool CodeGenTypes::isZeroInitializable(const RecordDecl *RD) { return getCGRecordLayout(RD).isZeroInitializable(); } diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h index 64c5799..1580e21 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h @@ -82,6 +82,9 @@ inline StructorType getFromCtorType(CXXCtorType T) { return StructorType::Base; case Ctor_Comdat: llvm_unreachable("not expecting a COMDAT"); + case Ctor_CopyingClosure: + case Ctor_DefaultClosure: + llvm_unreachable("not expecting a closure"); } llvm_unreachable("not a CXXCtorType"); } @@ -112,8 +115,8 @@ inline StructorType getFromDtorType(CXXDtorType T) { llvm_unreachable("not a CXXDtorType"); } -/// CodeGenTypes - This class organizes the cross-module state that is used -/// while lowering AST types to LLVM types. +/// This class organizes the cross-module state that is used while lowering +/// AST types to LLVM types. class CodeGenTypes { CodeGenModule &CGM; // Some of this stuff should probably be left on the CGM. @@ -133,34 +136,32 @@ class CodeGenTypes { /// types are never refined. llvm::DenseMap<const ObjCInterfaceType*, llvm::Type *> InterfaceTypes; - /// CGRecordLayouts - This maps llvm struct type with corresponding - /// record layout info. + /// Maps clang struct type with corresponding record layout info. llvm::DenseMap<const Type*, CGRecordLayout *> CGRecordLayouts; - /// RecordDeclTypes - This contains the LLVM IR type for any converted - /// RecordDecl. + /// Contains the LLVM IR type for any converted RecordDecl. llvm::DenseMap<const Type*, llvm::StructType *> RecordDeclTypes; - /// FunctionInfos - Hold memoized CGFunctionInfo results. + /// Hold memoized CGFunctionInfo results. llvm::FoldingSet<CGFunctionInfo> FunctionInfos; - /// RecordsBeingLaidOut - This set keeps track of records that we're currently - /// converting to an IR type. For example, when converting: + /// This set keeps track of records that we're currently converting + /// to an IR type. For example, when converting: /// struct A { struct B { int x; } } when processing 'x', the 'A' and 'B' /// types will be in this set. llvm::SmallPtrSet<const Type*, 4> RecordsBeingLaidOut; llvm::SmallPtrSet<const CGFunctionInfo*, 4> FunctionsBeingProcessed; - /// SkippedLayout - True if we didn't layout a function due to a being inside + /// True if we didn't layout a function due to a being inside /// a recursive struct conversion, set this to true. bool SkippedLayout; SmallVector<const RecordDecl *, 8> DeferredRecords; private: - /// TypeCache - This map keeps cache of llvm::Types - /// and maps clang::Type to corresponding llvm::Type. + /// This map keeps cache of llvm::Types and maps clang::Type to + /// corresponding llvm::Type. llvm::DenseMap<const Type *, llvm::Type *> TypeCache; public: @@ -261,6 +262,8 @@ public: const FunctionProtoType *type, RequiredArgs required); const CGFunctionInfo &arrangeMSMemberPointerThunk(const CXXMethodDecl *MD); + const CGFunctionInfo &arrangeMSCtorClosure(const CXXConstructorDecl *CD, + CXXCtorType CT); const CGFunctionInfo &arrangeFreeFunctionType(CanQual<FunctionProtoType> Ty); const CGFunctionInfo &arrangeFreeFunctionType(CanQual<FunctionNoProtoType> Ty); @@ -305,7 +308,7 @@ public: // These are internal details of CGT that shouldn't be used externally. /// IsZeroInitializable - Return whether a record type can be /// zero-initialized (in the C++ sense) with an LLVM zeroinitializer. - bool isZeroInitializable(const CXXRecordDecl *RD); + bool isZeroInitializable(const RecordDecl *RD); bool isRecordLayoutComplete(const Type *Ty) const; bool noRecordsBeingLaidOut() const { diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CoverageMappingGen.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CoverageMappingGen.cpp index 6f159d4..024a45d 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CoverageMappingGen.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CoverageMappingGen.cpp @@ -15,6 +15,7 @@ #include "CodeGenFunction.h" #include "clang/AST/StmtVisitor.h" #include "clang/Lex/Lexer.h" +#include "llvm/ADT/Optional.h" #include "llvm/ProfileData/CoverageMapping.h" #include "llvm/ProfileData/CoverageMappingReader.h" #include "llvm/ProfileData/CoverageMappingWriter.h" @@ -33,87 +34,50 @@ namespace { /// \brief A region of source code that can be mapped to a counter. class SourceMappingRegion { -public: - enum RegionFlags { - /// \brief This region won't be emitted if it wasn't extended. - /// This is useful so that we won't emit source ranges for single tokens - /// that we don't really care that much about, like: - /// the '(' token in #define MACRO ( - IgnoreIfNotExtended = 0x0001, - }; - -private: - FileID File, MacroArgumentFile; - Counter Count; - /// \brief A statement that initiated the count of Zero. - /// - /// This initiator statement is useful to prevent merging of unreachable - /// regions with different statements that caused the counter to become - /// unreachable. - const Stmt *UnreachableInitiator; - - /// \brief A statement that separates certain mapping regions into groups. - /// - /// The group statement is sometimes useful when we are emitting the source - /// regions not in their correct lexical order, e.g. the regions for the - /// incrementation expression in the 'for' construct. By marking the regions - /// in the incrementation expression with the group statement, we avoid the - /// merging of the regions from the incrementation expression and the loop's - /// body. - const Stmt *Group; - /// \brief The region's starting location. - SourceLocation LocStart; + Optional<SourceLocation> LocStart; /// \brief The region's ending location. - SourceLocation LocEnd, AlternativeLocEnd; - unsigned Flags; + Optional<SourceLocation> LocEnd; public: - SourceMappingRegion(FileID File, FileID MacroArgumentFile, Counter Count, - const Stmt *UnreachableInitiator, const Stmt *Group, - SourceLocation LocStart, SourceLocation LocEnd, - unsigned Flags = 0) - : File(File), MacroArgumentFile(MacroArgumentFile), Count(Count), - UnreachableInitiator(UnreachableInitiator), Group(Group), - LocStart(LocStart), LocEnd(LocEnd), AlternativeLocEnd(LocStart), - Flags(Flags) {} + SourceMappingRegion(Counter Count, Optional<SourceLocation> LocStart, + Optional<SourceLocation> LocEnd) + : Count(Count), LocStart(LocStart), LocEnd(LocEnd) {} + + SourceMappingRegion(SourceMappingRegion &&Region) + : Count(std::move(Region.Count)), LocStart(std::move(Region.LocStart)), + LocEnd(std::move(Region.LocEnd)) {} - const FileID &getFile() const { return File; } + SourceMappingRegion &operator=(SourceMappingRegion &&RHS) { + Count = std::move(RHS.Count); + LocStart = std::move(RHS.LocStart); + LocEnd = std::move(RHS.LocEnd); + return *this; + } const Counter &getCounter() const { return Count; } - const SourceLocation &getStartLoc() const { return LocStart; } + void setCounter(Counter C) { Count = C; } - const SourceLocation &getEndLoc(const SourceManager &SM) const { - if (SM.getFileID(LocEnd) != File) - return AlternativeLocEnd; - return LocEnd; - } + bool hasStartLoc() const { return LocStart.hasValue(); } - bool hasFlag(RegionFlags Flag) const { return (Flags & Flag) != 0; } + void setStartLoc(SourceLocation Loc) { LocStart = Loc; } - void setFlag(RegionFlags Flag) { Flags |= Flag; } + const SourceLocation &getStartLoc() const { + assert(LocStart && "Region has no start location"); + return *LocStart; + } - void clearFlag(RegionFlags Flag) { Flags &= ~Flag; } + bool hasEndLoc() const { return LocEnd.hasValue(); } - /// \brief Return true if two regions can be merged together. - bool isMergeable(SourceMappingRegion &R) { - // FIXME: We allow merging regions with a gap in between them. Should we? - return File == R.File && MacroArgumentFile == R.MacroArgumentFile && - Count == R.Count && UnreachableInitiator == R.UnreachableInitiator && - Group == R.Group; - } + void setEndLoc(SourceLocation Loc) { LocEnd = Loc; } - /// \brief A comparison that sorts such that mergeable regions are adjacent. - friend bool operator<(const SourceMappingRegion &LHS, - const SourceMappingRegion &RHS) { - return std::tie(LHS.File, LHS.MacroArgumentFile, LHS.Count, - LHS.UnreachableInitiator, LHS.Group) < - std::tie(RHS.File, RHS.MacroArgumentFile, RHS.Count, - RHS.UnreachableInitiator, RHS.Group); + const SourceLocation &getEndLoc() const { + assert(LocEnd && "Region has no end location"); + return *LocEnd; } }; @@ -126,26 +90,11 @@ public: const LangOptions &LangOpts; private: - struct FileInfo { - /// \brief The file id that will be used by the coverage mapping system. - unsigned CovMappingFileID; - const FileEntry *Entry; - - FileInfo(unsigned CovMappingFileID, const FileEntry *Entry) - : CovMappingFileID(CovMappingFileID), Entry(Entry) {} - }; - - /// \brief This mapping maps clang's FileIDs to file ids used - /// by the coverage mapping system and clang's file entries. - llvm::SmallDenseMap<FileID, FileInfo, 8> FileIDMapping; + /// \brief Map of clang's FileIDs to IDs used for coverage mapping. + llvm::SmallDenseMap<FileID, std::pair<unsigned, SourceLocation>, 8> + FileIDMapping; public: - /// \brief The statement that corresponds to the current source group. - const Stmt *CurrentSourceGroup; - - /// \brief The statement the initiated the current unreachable region. - const Stmt *CurrentUnreachableRegionInitiator; - /// \brief The coverage mapping regions for this function llvm::SmallVector<CounterMappingRegion, 32> MappingRegions; /// \brief The source mapping regions for this function. @@ -153,60 +102,104 @@ public: CoverageMappingBuilder(CoverageMappingModuleGen &CVM, SourceManager &SM, const LangOptions &LangOpts) - : CVM(CVM), SM(SM), LangOpts(LangOpts), - CurrentSourceGroup(nullptr), - CurrentUnreachableRegionInitiator(nullptr) {} + : CVM(CVM), SM(SM), LangOpts(LangOpts) {} /// \brief Return the precise end location for the given token. SourceLocation getPreciseTokenLocEnd(SourceLocation Loc) { - return Lexer::getLocForEndOfToken(SM.getSpellingLoc(Loc), 0, SM, LangOpts); + // We avoid getLocForEndOfToken here, because it doesn't do what we want for + // macro locations, which we just treat as expanded files. + unsigned TokLen = + Lexer::MeasureTokenLength(SM.getSpellingLoc(Loc), SM, LangOpts); + return Loc.getLocWithOffset(TokLen); } - /// \brief Create the mapping that maps from the function's file ids to - /// the indices for the translation unit's filenames. - void createFileIDMapping(SmallVectorImpl<unsigned> &Mapping) { - Mapping.resize(FileIDMapping.size(), 0); - for (const auto &I : FileIDMapping) - Mapping[I.second.CovMappingFileID] = CVM.getFileID(I.second.Entry); + /// \brief Return the start location of an included file or expanded macro. + SourceLocation getStartOfFileOrMacro(SourceLocation Loc) { + if (Loc.isMacroID()) + return Loc.getLocWithOffset(-SM.getFileOffset(Loc)); + return SM.getLocForStartOfFile(SM.getFileID(Loc)); } - /// \brief Get the coverage mapping file id that corresponds to the given - /// clang file id. If such file id doesn't exist, it gets added to the - /// mapping that maps from clang's file ids to coverage mapping file ids. - /// Return true if there was an error getting the coverage mapping file id. - /// An example of an when this function fails is when the region tries - /// to get a coverage file id for a location in a built-in macro. - bool getCoverageFileID(SourceLocation LocStart, FileID File, - FileID SpellingFile, unsigned &Result) { - auto Mapping = FileIDMapping.find(File); - if (Mapping != FileIDMapping.end()) { - Result = Mapping->second.CovMappingFileID; - return false; - } + /// \brief Return the end location of an included file or expanded macro. + SourceLocation getEndOfFileOrMacro(SourceLocation Loc) { + if (Loc.isMacroID()) + return Loc.getLocWithOffset(SM.getFileIDSize(SM.getFileID(Loc)) - + SM.getFileOffset(Loc)); + return SM.getLocForEndOfFile(SM.getFileID(Loc)); + } + + /// \brief Find out where the current file is included or macro is expanded. + SourceLocation getIncludeOrExpansionLoc(SourceLocation Loc) { + return Loc.isMacroID() ? SM.getImmediateExpansionRange(Loc).first + : SM.getIncludeLoc(SM.getFileID(Loc)); + } + + /// \brief Return true if \c Loc is a location in a built-in macro. + bool isInBuiltin(SourceLocation Loc) { + return strcmp(SM.getBufferName(SM.getSpellingLoc(Loc)), "<built-in>") == 0; + } - auto Entry = SM.getFileEntryForID(SpellingFile); - if (!Entry) - return true; + /// \brief Get the start of \c S ignoring macro arguments and builtin macros. + SourceLocation getStart(const Stmt *S) { + SourceLocation Loc = S->getLocStart(); + while (SM.isMacroArgExpansion(Loc) || isInBuiltin(Loc)) + Loc = SM.getImmediateExpansionRange(Loc).first; + return Loc; + } - Result = FileIDMapping.size(); - FileIDMapping.insert(std::make_pair(File, FileInfo(Result, Entry))); - createFileExpansionRegion(LocStart, File); - return false; + /// \brief Get the end of \c S ignoring macro arguments and builtin macros. + SourceLocation getEnd(const Stmt *S) { + SourceLocation Loc = S->getLocEnd(); + while (SM.isMacroArgExpansion(Loc) || isInBuiltin(Loc)) + Loc = SM.getImmediateExpansionRange(Loc).first; + return getPreciseTokenLocEnd(Loc); } - /// \brief Get the coverage mapping file id that corresponds to the given - /// clang file id. - /// Return true if there was an error getting the coverage mapping file id. - bool getExistingCoverageFileID(FileID File, unsigned &Result) { - // Make sure that the file is valid. - if (File.isInvalid()) - return true; - auto Mapping = FileIDMapping.find(File); - if (Mapping != FileIDMapping.end()) { - Result = Mapping->second.CovMappingFileID; - return false; + /// \brief Find the set of files we have regions for and assign IDs + /// + /// Fills \c Mapping with the virtual file mapping needed to write out + /// coverage and collects the necessary file information to emit source and + /// expansion regions. + void gatherFileIDs(SmallVectorImpl<unsigned> &Mapping) { + FileIDMapping.clear(); + + SmallVector<FileID, 8> Visited; + SmallVector<std::pair<SourceLocation, unsigned>, 8> FileLocs; + for (const auto &Region : SourceRegions) { + SourceLocation Loc = Region.getStartLoc(); + FileID File = SM.getFileID(Loc); + if (std::find(Visited.begin(), Visited.end(), File) != Visited.end()) + continue; + Visited.push_back(File); + + unsigned Depth = 0; + for (SourceLocation Parent = getIncludeOrExpansionLoc(Loc); + !Parent.isInvalid(); Parent = getIncludeOrExpansionLoc(Parent)) + ++Depth; + FileLocs.push_back(std::make_pair(Loc, Depth)); } - return true; + std::stable_sort(FileLocs.begin(), FileLocs.end(), llvm::less_second()); + + for (const auto &FL : FileLocs) { + SourceLocation Loc = FL.first; + FileID SpellingFile = SM.getDecomposedSpellingLoc(Loc).first; + auto Entry = SM.getFileEntryForID(SpellingFile); + if (!Entry) + continue; + + FileIDMapping[SM.getFileID(Loc)] = std::make_pair(Mapping.size(), Loc); + Mapping.push_back(CVM.getFileID(Entry)); + } + } + + /// \brief Get the coverage mapping file ID for \c Loc. + /// + /// If such file id doesn't exist, return None. + Optional<unsigned> getCoverageFileID(SourceLocation Loc) { + auto Mapping = FileIDMapping.find(SM.getFileID(Loc)); + if (Mapping != FileIDMapping.end()) + return Mapping->second.first; + return None; } /// \brief Return true if the given clang's file id has a corresponding @@ -235,154 +228,83 @@ public: for (const auto &I : SkippedRanges) { auto LocStart = I.getBegin(); auto LocEnd = I.getEnd(); - auto FileStart = SM.getFileID(LocStart); - if (!hasExistingCoverageFileID(FileStart)) - continue; - auto ActualFileStart = SM.getDecomposedSpellingLoc(LocStart).first; - if (ActualFileStart != SM.getDecomposedSpellingLoc(LocEnd).first) - // Ignore regions that span across multiple files. - continue; + assert(SM.isWrittenInSameFile(LocStart, LocEnd) && + "region spans multiple files"); - unsigned CovFileID; - if (getCoverageFileID(LocStart, FileStart, ActualFileStart, CovFileID)) + auto CovFileID = getCoverageFileID(LocStart); + if (!CovFileID) continue; unsigned LineStart = SM.getSpellingLineNumber(LocStart); unsigned ColumnStart = SM.getSpellingColumnNumber(LocStart); unsigned LineEnd = SM.getSpellingLineNumber(LocEnd); unsigned ColumnEnd = SM.getSpellingColumnNumber(LocEnd); - CounterMappingRegion Region(Counter(), CovFileID, LineStart, ColumnStart, - LineEnd, ColumnEnd, false, - CounterMappingRegion::SkippedRegion); + auto Region = CounterMappingRegion::makeSkipped( + *CovFileID, LineStart, ColumnStart, LineEnd, ColumnEnd); // Make sure that we only collect the regions that are inside // the souce code of this function. - if (Region.LineStart >= FileLineRanges[CovFileID].first && - Region.LineEnd <= FileLineRanges[CovFileID].second) + if (Region.LineStart >= FileLineRanges[*CovFileID].first && + Region.LineEnd <= FileLineRanges[*CovFileID].second) MappingRegions.push_back(Region); } } - /// \brief Create a mapping region that correponds to an expansion of - /// a macro or an embedded include. - void createFileExpansionRegion(SourceLocation Loc, FileID ExpandedFile) { - SourceLocation LocStart; - if (Loc.isMacroID()) - LocStart = SM.getImmediateExpansionRange(Loc).first; - else { - LocStart = SM.getIncludeLoc(ExpandedFile); - if (LocStart.isInvalid()) - return; // This file has no expansion region. - } - - auto File = SM.getFileID(LocStart); - auto SpellingFile = SM.getDecomposedSpellingLoc(LocStart).first; - unsigned CovFileID, ExpandedFileID; - if (getExistingCoverageFileID(ExpandedFile, ExpandedFileID)) - return; - if (getCoverageFileID(LocStart, File, SpellingFile, CovFileID)) - return; - unsigned LineStart = SM.getSpellingLineNumber(LocStart); - unsigned ColumnStart = SM.getSpellingColumnNumber(LocStart); - unsigned LineEnd = LineStart; - // Compute the end column manually as Lexer::getLocForEndOfToken doesn't - // give the correct result in all cases. - unsigned ColumnEnd = - ColumnStart + - Lexer::MeasureTokenLength(SM.getSpellingLoc(LocStart), SM, LangOpts); - - MappingRegions.push_back(CounterMappingRegion( - Counter(), CovFileID, LineStart, ColumnStart, LineEnd, ColumnEnd, - false, CounterMappingRegion::ExpansionRegion)); - MappingRegions.back().ExpandedFileID = ExpandedFileID; - } - - /// \brief Enter a source region group that is identified by the given - /// statement. - /// It's not possible to enter a group when there is already - /// another group present. - void beginSourceRegionGroup(const Stmt *Group) { - assert(!CurrentSourceGroup); - CurrentSourceGroup = Group; - } - - /// \brief Exit the current source region group. - void endSourceRegionGroup() { CurrentSourceGroup = nullptr; } - - /// \brief Associate a counter with a given source code range. - void mapSourceCodeRange(SourceLocation LocStart, SourceLocation LocEnd, - Counter Count, const Stmt *UnreachableInitiator, - const Stmt *SourceGroup, unsigned Flags = 0, - FileID MacroArgumentFile = FileID()) { - if (SM.isMacroArgExpansion(LocStart)) { - // Map the code range with the macro argument's value. - mapSourceCodeRange(SM.getImmediateSpellingLoc(LocStart), - SM.getImmediateSpellingLoc(LocEnd), Count, - UnreachableInitiator, SourceGroup, Flags, - SM.getFileID(LocStart)); - // Map the code range where the macro argument is referenced. - SourceLocation RefLocStart(SM.getImmediateExpansionRange(LocStart).first); - SourceLocation RefLocEnd(RefLocStart); - if (SM.isMacroArgExpansion(RefLocStart)) - mapSourceCodeRange(RefLocStart, RefLocEnd, Count, UnreachableInitiator, - SourceGroup, 0, SM.getFileID(RefLocStart)); - else - mapSourceCodeRange(RefLocStart, RefLocEnd, Count, UnreachableInitiator, - SourceGroup); - return; - } - auto File = SM.getFileID(LocStart); - // Make sure that the file id is valid. - if (File.isInvalid()) - return; - SourceRegions.emplace_back(File, MacroArgumentFile, Count, - UnreachableInitiator, SourceGroup, LocStart, - LocEnd, Flags); - } - - void mapSourceCodeRange(SourceLocation LocStart, SourceLocation LocEnd, - Counter Count, unsigned Flags = 0) { - mapSourceCodeRange(LocStart, LocEnd, Count, - CurrentUnreachableRegionInitiator, CurrentSourceGroup, - Flags); - } - /// \brief Generate the coverage counter mapping regions from collected /// source regions. void emitSourceRegions() { - std::sort(SourceRegions.begin(), SourceRegions.end()); - - for (auto I = SourceRegions.begin(), E = SourceRegions.end(); I != E; ++I) { - // Keep the original start location of this region. - SourceLocation LocStart = I->getStartLoc(); - SourceLocation LocEnd = I->getEndLoc(SM); - - bool Ignore = I->hasFlag(SourceMappingRegion::IgnoreIfNotExtended); - // We need to handle mergeable regions together. - for (auto Next = I + 1; Next != E && Next->isMergeable(*I); ++Next) { - ++I; - LocStart = std::min(LocStart, I->getStartLoc()); - LocEnd = std::max(LocEnd, I->getEndLoc(SM)); - // FIXME: Should we && together the Ignore flag of multiple regions? - Ignore = false; - } - if (Ignore) + for (const auto &Region : SourceRegions) { + assert(Region.hasEndLoc() && "incomplete region"); + + SourceLocation LocStart = Region.getStartLoc(); + assert(!SM.getFileID(LocStart).isInvalid() && "region in invalid file"); + + auto CovFileID = getCoverageFileID(LocStart); + // Ignore regions that don't have a file, such as builtin macros. + if (!CovFileID) continue; + SourceLocation LocEnd = Region.getEndLoc(); + assert(SM.isWrittenInSameFile(LocStart, LocEnd) && + "region spans multiple files"); + // Find the spilling locations for the mapping region. - LocEnd = getPreciseTokenLocEnd(LocEnd); unsigned LineStart = SM.getSpellingLineNumber(LocStart); unsigned ColumnStart = SM.getSpellingColumnNumber(LocStart); unsigned LineEnd = SM.getSpellingLineNumber(LocEnd); unsigned ColumnEnd = SM.getSpellingColumnNumber(LocEnd); - auto SpellingFile = SM.getDecomposedSpellingLoc(LocStart).first; - unsigned CovFileID; - if (getCoverageFileID(LocStart, I->getFile(), SpellingFile, CovFileID)) + assert(LineStart <= LineEnd && "region start and end out of order"); + MappingRegions.push_back(CounterMappingRegion::makeRegion( + Region.getCounter(), *CovFileID, LineStart, ColumnStart, LineEnd, + ColumnEnd)); + } + } + + /// \brief Generate expansion regions for each virtual file we've seen. + void emitExpansionRegions() { + for (const auto &FM : FileIDMapping) { + SourceLocation ExpandedLoc = FM.second.second; + SourceLocation ParentLoc = getIncludeOrExpansionLoc(ExpandedLoc); + if (ParentLoc.isInvalid()) + continue; + + auto ParentFileID = getCoverageFileID(ParentLoc); + if (!ParentFileID) continue; + auto ExpandedFileID = getCoverageFileID(ExpandedLoc); + assert(ExpandedFileID && "expansion in uncovered file"); + + SourceLocation LocEnd = getPreciseTokenLocEnd(ParentLoc); + assert(SM.isWrittenInSameFile(ParentLoc, LocEnd) && + "region spans multiple files"); - assert(LineStart <= LineEnd); - MappingRegions.push_back(CounterMappingRegion( - I->getCounter(), CovFileID, LineStart, ColumnStart, LineEnd, - ColumnEnd, false, CounterMappingRegion::CodeRegion)); + unsigned LineStart = SM.getSpellingLineNumber(ParentLoc); + unsigned ColumnStart = SM.getSpellingColumnNumber(ParentLoc); + unsigned LineEnd = SM.getSpellingLineNumber(LocEnd); + unsigned ColumnEnd = SM.getSpellingColumnNumber(LocEnd); + + MappingRegions.push_back(CounterMappingRegion::makeExpansion( + *ParentFileID, *ExpandedFileID, LineStart, ColumnStart, LineEnd, + ColumnEnd)); } } }; @@ -398,14 +320,14 @@ struct EmptyCoverageMappingBuilder : public CoverageMappingBuilder { if (!D->hasBody()) return; auto Body = D->getBody(); - mapSourceCodeRange(Body->getLocStart(), Body->getLocEnd(), Counter()); + SourceRegions.emplace_back(Counter(), getStart(Body), getEnd(Body)); } /// \brief Write the mapping data to the output stream void write(llvm::raw_ostream &OS) { - emitSourceRegions(); SmallVector<unsigned, 16> FileIDMapping; - createFileIDMapping(FileIDMapping); + gatherFileIDs(FileIDMapping); + emitSourceRegions(); CoverageMappingWriter Writer(FileIDMapping, None, MappingRegions); Writer.write(OS); @@ -420,136 +342,215 @@ struct CounterCoverageMappingBuilder /// \brief The map of statements to count values. llvm::DenseMap<const Stmt *, unsigned> &CounterMap; - Counter CurrentRegionCount; + /// \brief A stack of currently live regions. + std::vector<SourceMappingRegion> RegionStack; CounterExpressionBuilder Builder; - /// \brief Return a counter that represents the - /// expression that subracts rhs from lhs. + /// \brief A location in the most recently visited file or macro. + /// + /// This is used to adjust the active source regions appropriately when + /// expressions cross file or macro boundaries. + SourceLocation MostRecentLocation; + + /// \brief Return a counter for the subtraction of \c RHS from \c LHS Counter subtractCounters(Counter LHS, Counter RHS) { return Builder.subtract(LHS, RHS); } - /// \brief Return a counter that represents the - /// the exression that adds lhs and rhs. + /// \brief Return a counter for the sum of \c LHS and \c RHS. Counter addCounters(Counter LHS, Counter RHS) { return Builder.add(LHS, RHS); } + Counter addCounters(Counter C1, Counter C2, Counter C3) { + return addCounters(addCounters(C1, C2), C3); + } + + Counter addCounters(Counter C1, Counter C2, Counter C3, Counter C4) { + return addCounters(addCounters(C1, C2, C3), C4); + } + /// \brief Return the region counter for the given statement. + /// /// This should only be called on statements that have a dedicated counter. - unsigned getRegionCounter(const Stmt *S) { return CounterMap[S]; } + Counter getRegionCounter(const Stmt *S) { + return Counter::getCounter(CounterMap[S]); + } + + /// \brief Push a region onto the stack. + /// + /// Returns the index on the stack where the region was pushed. This can be + /// used with popRegions to exit a "scope", ending the region that was pushed. + size_t pushRegion(Counter Count, Optional<SourceLocation> StartLoc = None, + Optional<SourceLocation> EndLoc = None) { + if (StartLoc) + MostRecentLocation = *StartLoc; + RegionStack.emplace_back(Count, StartLoc, EndLoc); - /// \brief Return the region count for the counter at the given index. - Counter getRegionCount(unsigned CounterId) { - return Counter::getCounter(CounterId); + return RegionStack.size() - 1; } - /// \brief Return the counter value of the current region. - Counter getCurrentRegionCount() { return CurrentRegionCount; } + /// \brief Pop regions from the stack into the function's list of regions. + /// + /// Adds all regions from \c ParentIndex to the top of the stack to the + /// function's \c SourceRegions. + void popRegions(size_t ParentIndex) { + assert(RegionStack.size() >= ParentIndex && "parent not in stack"); + while (RegionStack.size() > ParentIndex) { + SourceMappingRegion &Region = RegionStack.back(); + if (Region.hasStartLoc()) { + SourceLocation StartLoc = Region.getStartLoc(); + SourceLocation EndLoc = Region.hasEndLoc() + ? Region.getEndLoc() + : RegionStack[ParentIndex].getEndLoc(); + while (!SM.isWrittenInSameFile(StartLoc, EndLoc)) { + // The region ends in a nested file or macro expansion. Create a + // separate region for each expansion. + SourceLocation NestedLoc = getStartOfFileOrMacro(EndLoc); + assert(SM.isWrittenInSameFile(NestedLoc, EndLoc)); + + SourceRegions.emplace_back(Region.getCounter(), NestedLoc, EndLoc); + + EndLoc = getPreciseTokenLocEnd(getIncludeOrExpansionLoc(EndLoc)); + assert(!EndLoc.isInvalid() && + "File exit was not handled before popRegions"); + } + Region.setEndLoc(EndLoc); + + MostRecentLocation = EndLoc; + // If this region happens to span an entire expansion, we need to make + // sure we don't overlap the parent region with it. + if (StartLoc == getStartOfFileOrMacro(StartLoc) && + EndLoc == getEndOfFileOrMacro(EndLoc)) + MostRecentLocation = getIncludeOrExpansionLoc(EndLoc); + + assert(SM.isWrittenInSameFile(Region.getStartLoc(), EndLoc)); + SourceRegions.push_back(std::move(Region)); + } + RegionStack.pop_back(); + } + } - /// \brief Set the counter value for the current region. - /// This is used to keep track of changes to the most recent counter - /// from control flow and non-local exits. - void setCurrentRegionCount(Counter Count) { - CurrentRegionCount = Count; - CurrentUnreachableRegionInitiator = nullptr; + /// \brief Return the currently active region. + SourceMappingRegion &getRegion() { + assert(!RegionStack.empty() && "statement has no region"); + return RegionStack.back(); } - /// \brief Indicate that the current region is never reached, - /// and thus should have a counter value of zero. - /// This is important so that subsequent regions can correctly track - /// their parent counts. - void setCurrentRegionUnreachable(const Stmt *Initiator) { - CurrentRegionCount = Counter::getZero(); - CurrentUnreachableRegionInitiator = Initiator; + /// \brief Propagate counts through the children of \c S. + Counter propagateCounts(Counter TopCount, const Stmt *S) { + size_t Index = pushRegion(TopCount, getStart(S), getEnd(S)); + Visit(S); + Counter ExitCount = getRegion().getCounter(); + popRegions(Index); + return ExitCount; } - /// \brief A counter for a particular region. - /// This is the primary interface through - /// which the coverage mapping builder manages counters and their values. - class RegionMapper { - CounterCoverageMappingBuilder &Mapping; - Counter Count; - Counter ParentCount; - Counter RegionCount; - Counter Adjust; - - public: - RegionMapper(CounterCoverageMappingBuilder *Mapper, const Stmt *S) - : Mapping(*Mapper), - Count(Mapper->getRegionCount(Mapper->getRegionCounter(S))), - ParentCount(Mapper->getCurrentRegionCount()) {} - - /// Get the value of the counter. In most cases this is the number of times - /// the region of the counter was entered, but for switch labels it's the - /// number of direct jumps to that label. - Counter getCount() const { return Count; } - - /// Get the value of the counter with adjustments applied. Adjustments occur - /// when control enters or leaves the region abnormally; i.e., if there is a - /// jump to a label within the region, or if the function can return from - /// within the region. The adjusted count, then, is the value of the counter - /// at the end of the region. - Counter getAdjustedCount() const { - return Mapping.addCounters(Count, Adjust); - } + /// \brief Adjust the most recently visited location to \c EndLoc. + /// + /// This should be used after visiting any statements in non-source order. + void adjustForOutOfOrderTraversal(SourceLocation EndLoc) { + MostRecentLocation = EndLoc; + // Avoid adding duplicate regions if we have a completed region on the top + // of the stack and are adjusting to the end of a virtual file. + if (getRegion().hasEndLoc() && + MostRecentLocation == getEndOfFileOrMacro(MostRecentLocation)) + MostRecentLocation = getIncludeOrExpansionLoc(MostRecentLocation); + } + + /// \brief Check whether \c Loc is included or expanded from \c Parent. + bool isNestedIn(SourceLocation Loc, FileID Parent) { + do { + Loc = getIncludeOrExpansionLoc(Loc); + if (Loc.isInvalid()) + return false; + } while (!SM.isInFileID(Loc, Parent)); + return true; + } - /// Get the value of the counter in this region's parent, i.e., the region - /// that was active when this region began. This is useful for deriving - /// counts in implicitly counted regions, like the false case of a condition - /// or the normal exits of a loop. - Counter getParentCount() const { return ParentCount; } - - /// Activate the counter by emitting an increment and starting to track - /// adjustments. If AddIncomingFallThrough is true, the current region count - /// will be added to the counter for the purposes of tracking the region. - void beginRegion(bool AddIncomingFallThrough = false) { - RegionCount = Count; - if (AddIncomingFallThrough) - RegionCount = - Mapping.addCounters(RegionCount, Mapping.getCurrentRegionCount()); - Mapping.setCurrentRegionCount(RegionCount); - } + /// \brief Adjust regions and state when \c NewLoc exits a file. + /// + /// If moving from our most recently tracked location to \c NewLoc exits any + /// files, this adjusts our current region stack and creates the file regions + /// for the exited file. + void handleFileExit(SourceLocation NewLoc) { + if (SM.isWrittenInSameFile(MostRecentLocation, NewLoc)) + return; - /// For counters on boolean branches, begins tracking adjustments for the - /// uncounted path. - void beginElseRegion() { - RegionCount = Mapping.subtractCounters(ParentCount, Count); - Mapping.setCurrentRegionCount(RegionCount); + // If NewLoc is not in a file that contains MostRecentLocation, walk up to + // find the common ancestor. + SourceLocation LCA = NewLoc; + FileID ParentFile = SM.getFileID(LCA); + while (!isNestedIn(MostRecentLocation, ParentFile)) { + LCA = getIncludeOrExpansionLoc(LCA); + if (LCA.isInvalid() || SM.isWrittenInSameFile(LCA, MostRecentLocation)) { + // Since there isn't a common ancestor, no file was exited. We just need + // to adjust our location to the new file. + MostRecentLocation = NewLoc; + return; + } + ParentFile = SM.getFileID(LCA); } - /// Reset the current region count. - void setCurrentRegionCount(Counter CurrentCount) { - RegionCount = CurrentCount; - Mapping.setCurrentRegionCount(RegionCount); - } + llvm::SmallSet<SourceLocation, 8> StartLocs; + Optional<Counter> ParentCounter; + for (auto I = RegionStack.rbegin(), E = RegionStack.rend(); I != E; ++I) { + if (!I->hasStartLoc()) + continue; + SourceLocation Loc = I->getStartLoc(); + if (!isNestedIn(Loc, ParentFile)) { + ParentCounter = I->getCounter(); + break; + } - /// Adjust for non-local control flow after emitting a subexpression or - /// substatement. This must be called to account for constructs such as - /// gotos, - /// labels, and returns, so that we can ensure that our region's count is - /// correct in the code that follows. - void adjustForControlFlow() { - Adjust = Mapping.addCounters( - Adjust, Mapping.subtractCounters(Mapping.getCurrentRegionCount(), - RegionCount)); - // Reset the region count in case this is called again later. - RegionCount = Mapping.getCurrentRegionCount(); + while (!SM.isInFileID(Loc, ParentFile)) { + // The most nested region for each start location is the one with the + // correct count. We avoid creating redundant regions by stopping once + // we've seen this region. + if (StartLocs.insert(Loc).second) + SourceRegions.emplace_back(I->getCounter(), Loc, + getEndOfFileOrMacro(Loc)); + Loc = getIncludeOrExpansionLoc(Loc); + } + I->setStartLoc(getPreciseTokenLocEnd(Loc)); } - /// Commit all adjustments to the current region. If the region is a loop, - /// the LoopAdjust value should be the count of all the breaks and continues - /// from the loop, to compensate for those counts being deducted from the - /// adjustments for the body of the loop. - void applyAdjustmentsToRegion() { - Mapping.setCurrentRegionCount(Mapping.addCounters(ParentCount, Adjust)); - } - void applyAdjustmentsToRegion(Counter LoopAdjust) { - Mapping.setCurrentRegionCount(Mapping.addCounters( - Mapping.addCounters(ParentCount, Adjust), LoopAdjust)); + if (ParentCounter) { + // If the file is contained completely by another region and doesn't + // immediately start its own region, the whole file gets a region + // corresponding to the parent. + SourceLocation Loc = MostRecentLocation; + while (isNestedIn(Loc, ParentFile)) { + SourceLocation FileStart = getStartOfFileOrMacro(Loc); + if (StartLocs.insert(FileStart).second) + SourceRegions.emplace_back(*ParentCounter, FileStart, + getEndOfFileOrMacro(Loc)); + Loc = getIncludeOrExpansionLoc(Loc); + } } - }; + + MostRecentLocation = NewLoc; + } + + /// \brief Ensure that \c S is included in the current region. + void extendRegion(const Stmt *S) { + SourceMappingRegion &Region = getRegion(); + SourceLocation StartLoc = getStart(S); + + handleFileExit(StartLoc); + if (!Region.hasStartLoc()) + Region.setStartLoc(StartLoc); + } + + /// \brief Mark \c S as a terminator, starting a zero region. + void terminateRegion(const Stmt *S) { + extendRegion(S); + SourceMappingRegion &Region = getRegion(); + if (!Region.hasEndLoc()) + Region.setEndLoc(getEnd(S)); + pushRegion(Counter::getZero()); + } /// \brief Keep counts of breaks and continues inside loops. struct BreakContinue { @@ -566,437 +567,318 @@ struct CounterCoverageMappingBuilder /// \brief Write the mapping data to the output stream void write(llvm::raw_ostream &OS) { - emitSourceRegions(); llvm::SmallVector<unsigned, 8> VirtualFileMapping; - createFileIDMapping(VirtualFileMapping); + gatherFileIDs(VirtualFileMapping); + emitSourceRegions(); + emitExpansionRegions(); gatherSkippedRegions(); - CoverageMappingWriter Writer( - VirtualFileMapping, Builder.getExpressions(), MappingRegions); + CoverageMappingWriter Writer(VirtualFileMapping, Builder.getExpressions(), + MappingRegions); Writer.write(OS); } - /// \brief Associate the source code range with the current region count. - void mapSourceCodeRange(SourceLocation LocStart, SourceLocation LocEnd, - unsigned Flags = 0) { - CoverageMappingBuilder::mapSourceCodeRange(LocStart, LocEnd, - CurrentRegionCount, Flags); - } - - void mapSourceCodeRange(SourceLocation LocStart) { - CoverageMappingBuilder::mapSourceCodeRange(LocStart, LocStart, - CurrentRegionCount); - } - - /// \brief Associate the source range of a token with the current region - /// count. - /// Ignore the source range for this token if it produces a distinct - /// mapping region with no other source ranges. - void mapToken(SourceLocation LocStart) { - CoverageMappingBuilder::mapSourceCodeRange( - LocStart, LocStart, CurrentRegionCount, - SourceMappingRegion::IgnoreIfNotExtended); - } - void VisitStmt(const Stmt *S) { - mapSourceCodeRange(S->getLocStart()); + if (!S->getLocStart().isInvalid()) + extendRegion(S); for (Stmt::const_child_range I = S->children(); I; ++I) { if (*I) this->Visit(*I); } + handleFileExit(getEnd(S)); } void VisitDecl(const Decl *D) { - if (!D->hasBody()) - return; - // Counter tracks entry to the function body. - auto Body = D->getBody(); - RegionMapper Cnt(this, Body); - Cnt.beginRegion(); - Visit(Body); - } - - void VisitDeclStmt(const DeclStmt *S) { - mapSourceCodeRange(S->getLocStart()); - for (Stmt::const_child_range I = static_cast<const Stmt *>(S)->children(); - I; ++I) { - if (*I) - this->Visit(*I); - } - } - - void VisitCompoundStmt(const CompoundStmt *S) { - mapSourceCodeRange(S->getLBracLoc()); - mapSourceCodeRange(S->getRBracLoc()); - for (Stmt::const_child_range I = S->children(); I; ++I) { - if (*I) - this->Visit(*I); - } + Stmt *Body = D->getBody(); + propagateCounts(getRegionCounter(Body), Body); } void VisitReturnStmt(const ReturnStmt *S) { - mapSourceCodeRange(S->getLocStart()); + extendRegion(S); if (S->getRetValue()) Visit(S->getRetValue()); - setCurrentRegionUnreachable(S); + terminateRegion(S); } - void VisitGotoStmt(const GotoStmt *S) { - mapSourceCodeRange(S->getLocStart()); - mapToken(S->getLabelLoc()); - setCurrentRegionUnreachable(S); + void VisitCXXThrowExpr(const CXXThrowExpr *E) { + extendRegion(E); + if (E->getSubExpr()) + Visit(E->getSubExpr()); + terminateRegion(E); } + void VisitGotoStmt(const GotoStmt *S) { terminateRegion(S); } + void VisitLabelStmt(const LabelStmt *S) { - // Counter tracks the block following the label. - RegionMapper Cnt(this, S); - Cnt.beginRegion(); - mapSourceCodeRange(S->getLocStart()); - // Can't map the ':' token as its location isn't known. + SourceLocation Start = getStart(S); + // We can't extendRegion here or we risk overlapping with our new region. + handleFileExit(Start); + pushRegion(getRegionCounter(S), Start); Visit(S->getSubStmt()); } void VisitBreakStmt(const BreakStmt *S) { - mapSourceCodeRange(S->getLocStart()); assert(!BreakContinueStack.empty() && "break not in a loop or switch!"); BreakContinueStack.back().BreakCount = addCounters( - BreakContinueStack.back().BreakCount, getCurrentRegionCount()); - setCurrentRegionUnreachable(S); + BreakContinueStack.back().BreakCount, getRegion().getCounter()); + terminateRegion(S); } void VisitContinueStmt(const ContinueStmt *S) { - mapSourceCodeRange(S->getLocStart()); assert(!BreakContinueStack.empty() && "continue stmt not in a loop!"); BreakContinueStack.back().ContinueCount = addCounters( - BreakContinueStack.back().ContinueCount, getCurrentRegionCount()); - setCurrentRegionUnreachable(S); + BreakContinueStack.back().ContinueCount, getRegion().getCounter()); + terminateRegion(S); } void VisitWhileStmt(const WhileStmt *S) { - mapSourceCodeRange(S->getLocStart()); - // Counter tracks the body of the loop. - RegionMapper Cnt(this, S); + extendRegion(S); + + Counter ParentCount = getRegion().getCounter(); + Counter BodyCount = getRegionCounter(S); + + // Handle the body first so that we can get the backedge count. BreakContinueStack.push_back(BreakContinue()); - // Visit the body region first so the break/continue adjustments can be - // included when visiting the condition. - Cnt.beginRegion(); - Visit(S->getBody()); - Cnt.adjustForControlFlow(); - - // ...then go back and propagate counts through the condition. The count - // at the start of the condition is the sum of the incoming edges, - // the backedge from the end of the loop body, and the edges from - // continue statements. + extendRegion(S->getBody()); + Counter BackedgeCount = propagateCounts(BodyCount, S->getBody()); BreakContinue BC = BreakContinueStack.pop_back_val(); - Cnt.setCurrentRegionCount( - addCounters(Cnt.getParentCount(), - addCounters(Cnt.getAdjustedCount(), BC.ContinueCount))); - beginSourceRegionGroup(S->getCond()); - Visit(S->getCond()); - endSourceRegionGroup(); - Cnt.adjustForControlFlow(); - Cnt.applyAdjustmentsToRegion(addCounters(BC.BreakCount, BC.ContinueCount)); + + // Go back to handle the condition. + Counter CondCount = + addCounters(ParentCount, BackedgeCount, BC.ContinueCount); + propagateCounts(CondCount, S->getCond()); + adjustForOutOfOrderTraversal(getEnd(S)); + + Counter OutCount = + addCounters(BC.BreakCount, subtractCounters(CondCount, BodyCount)); + if (OutCount != ParentCount) + pushRegion(OutCount); } void VisitDoStmt(const DoStmt *S) { - mapSourceCodeRange(S->getLocStart()); - // Counter tracks the body of the loop. - RegionMapper Cnt(this, S); - BreakContinueStack.push_back(BreakContinue()); - Cnt.beginRegion(/*AddIncomingFallThrough=*/true); - Visit(S->getBody()); - Cnt.adjustForControlFlow(); + extendRegion(S); + + Counter ParentCount = getRegion().getCounter(); + Counter BodyCount = getRegionCounter(S); + BreakContinueStack.push_back(BreakContinue()); + extendRegion(S->getBody()); + Counter BackedgeCount = + propagateCounts(addCounters(ParentCount, BodyCount), S->getBody()); BreakContinue BC = BreakContinueStack.pop_back_val(); - // The count at the start of the condition is equal to the count at the - // end of the body. The adjusted count does not include either the - // fall-through count coming into the loop or the continue count, so add - // both of those separately. This is coincidentally the same equation as - // with while loops but for different reasons. - Cnt.setCurrentRegionCount( - addCounters(Cnt.getParentCount(), - addCounters(Cnt.getAdjustedCount(), BC.ContinueCount))); - Visit(S->getCond()); - Cnt.adjustForControlFlow(); - Cnt.applyAdjustmentsToRegion(addCounters(BC.BreakCount, BC.ContinueCount)); + + Counter CondCount = addCounters(BackedgeCount, BC.ContinueCount); + propagateCounts(CondCount, S->getCond()); + + Counter OutCount = + addCounters(BC.BreakCount, subtractCounters(CondCount, BodyCount)); + if (OutCount != ParentCount) + pushRegion(OutCount); } void VisitForStmt(const ForStmt *S) { - mapSourceCodeRange(S->getLocStart()); + extendRegion(S); if (S->getInit()) Visit(S->getInit()); - // Counter tracks the body of the loop. - RegionMapper Cnt(this, S); + Counter ParentCount = getRegion().getCounter(); + Counter BodyCount = getRegionCounter(S); + + // Handle the body first so that we can get the backedge count. BreakContinueStack.push_back(BreakContinue()); - // Visit the body region first. (This is basically the same as a while - // loop; see further comments in VisitWhileStmt.) - Cnt.beginRegion(); - Visit(S->getBody()); - Cnt.adjustForControlFlow(); + extendRegion(S->getBody()); + Counter BackedgeCount = propagateCounts(BodyCount, S->getBody()); + BreakContinue BC = BreakContinueStack.pop_back_val(); // The increment is essentially part of the body but it needs to include // the count for all the continue statements. - if (S->getInc()) { - Cnt.setCurrentRegionCount(addCounters( - getCurrentRegionCount(), BreakContinueStack.back().ContinueCount)); - beginSourceRegionGroup(S->getInc()); - Visit(S->getInc()); - endSourceRegionGroup(); - Cnt.adjustForControlFlow(); + if (const Stmt *Inc = S->getInc()) + propagateCounts(addCounters(BackedgeCount, BC.ContinueCount), Inc); + + // Go back to handle the condition. + Counter CondCount = + addCounters(ParentCount, BackedgeCount, BC.ContinueCount); + if (const Expr *Cond = S->getCond()) { + propagateCounts(CondCount, Cond); + adjustForOutOfOrderTraversal(getEnd(S)); } - BreakContinue BC = BreakContinueStack.pop_back_val(); - - // ...then go back and propagate counts through the condition. - if (S->getCond()) { - Cnt.setCurrentRegionCount( - addCounters(addCounters(Cnt.getParentCount(), Cnt.getAdjustedCount()), - BC.ContinueCount)); - beginSourceRegionGroup(S->getCond()); - Visit(S->getCond()); - endSourceRegionGroup(); - Cnt.adjustForControlFlow(); - } - Cnt.applyAdjustmentsToRegion(addCounters(BC.BreakCount, BC.ContinueCount)); + Counter OutCount = + addCounters(BC.BreakCount, subtractCounters(CondCount, BodyCount)); + if (OutCount != ParentCount) + pushRegion(OutCount); } void VisitCXXForRangeStmt(const CXXForRangeStmt *S) { - mapSourceCodeRange(S->getLocStart()); + extendRegion(S); + Visit(S->getLoopVarStmt()); Visit(S->getRangeStmt()); - Visit(S->getBeginEndStmt()); - // Counter tracks the body of the loop. - RegionMapper Cnt(this, S); + + Counter ParentCount = getRegion().getCounter(); + Counter BodyCount = getRegionCounter(S); + BreakContinueStack.push_back(BreakContinue()); - // Visit the body region first. (This is basically the same as a while - // loop; see further comments in VisitWhileStmt.) - Cnt.beginRegion(); - Visit(S->getBody()); - Cnt.adjustForControlFlow(); + extendRegion(S->getBody()); + Counter BackedgeCount = propagateCounts(BodyCount, S->getBody()); BreakContinue BC = BreakContinueStack.pop_back_val(); - Cnt.applyAdjustmentsToRegion(addCounters(BC.BreakCount, BC.ContinueCount)); + + Counter LoopCount = + addCounters(ParentCount, BackedgeCount, BC.ContinueCount); + Counter OutCount = + addCounters(BC.BreakCount, subtractCounters(LoopCount, BodyCount)); + if (OutCount != ParentCount) + pushRegion(OutCount); } void VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S) { - mapSourceCodeRange(S->getLocStart()); + extendRegion(S); Visit(S->getElement()); - // Counter tracks the body of the loop. - RegionMapper Cnt(this, S); + + Counter ParentCount = getRegion().getCounter(); + Counter BodyCount = getRegionCounter(S); + BreakContinueStack.push_back(BreakContinue()); - Cnt.beginRegion(); - Visit(S->getBody()); + extendRegion(S->getBody()); + Counter BackedgeCount = propagateCounts(BodyCount, S->getBody()); BreakContinue BC = BreakContinueStack.pop_back_val(); - Cnt.adjustForControlFlow(); - Cnt.applyAdjustmentsToRegion(addCounters(BC.BreakCount, BC.ContinueCount)); + + Counter LoopCount = + addCounters(ParentCount, BackedgeCount, BC.ContinueCount); + Counter OutCount = + addCounters(BC.BreakCount, subtractCounters(LoopCount, BodyCount)); + if (OutCount != ParentCount) + pushRegion(OutCount); } void VisitSwitchStmt(const SwitchStmt *S) { - mapSourceCodeRange(S->getLocStart()); + extendRegion(S); Visit(S->getCond()); + BreakContinueStack.push_back(BreakContinue()); - // Map the '}' for the body to have the same count as the regions after - // the switch. - SourceLocation RBracLoc; - if (const auto *CS = dyn_cast<CompoundStmt>(S->getBody())) { - mapSourceCodeRange(CS->getLBracLoc()); - setCurrentRegionUnreachable(S); - for (Stmt::const_child_range I = CS->children(); I; ++I) { - if (*I) - this->Visit(*I); + + const Stmt *Body = S->getBody(); + extendRegion(Body); + if (const auto *CS = dyn_cast<CompoundStmt>(Body)) { + if (!CS->body_empty()) { + // The body of the switch needs a zero region so that fallthrough counts + // behave correctly, but it would be misleading to include the braces of + // the compound statement in the zeroed area, so we need to handle this + // specially. + size_t Index = + pushRegion(Counter::getZero(), getStart(CS->body_front()), + getEnd(CS->body_back())); + for (const auto *Child : CS->children()) + Visit(Child); + popRegions(Index); } - RBracLoc = CS->getRBracLoc(); - } else { - setCurrentRegionUnreachable(S); - Visit(S->getBody()); - } - // If the switch is inside a loop, add the continue counts. + } else + propagateCounts(Counter::getZero(), Body); BreakContinue BC = BreakContinueStack.pop_back_val(); + if (!BreakContinueStack.empty()) BreakContinueStack.back().ContinueCount = addCounters( BreakContinueStack.back().ContinueCount, BC.ContinueCount); - // Counter tracks the exit block of the switch. - RegionMapper ExitCnt(this, S); - ExitCnt.beginRegion(); - if (RBracLoc.isValid()) - mapSourceCodeRange(RBracLoc); - } - void VisitCaseStmt(const CaseStmt *S) { - // Counter for this particular case. This counts only jumps from the - // switch header and does not include fallthrough from the case before - // this one. - RegionMapper Cnt(this, S); - Cnt.beginRegion(/*AddIncomingFallThrough=*/true); - mapSourceCodeRange(S->getLocStart()); - mapToken(S->getColonLoc()); - Visit(S->getSubStmt()); + Counter ExitCount = getRegionCounter(S); + pushRegion(ExitCount); } - void VisitDefaultStmt(const DefaultStmt *S) { - // Counter for this default case. This does not include fallthrough from - // the previous case. - RegionMapper Cnt(this, S); - Cnt.beginRegion(/*AddIncomingFallThrough=*/true); - mapSourceCodeRange(S->getLocStart()); - mapToken(S->getColonLoc()); + void VisitSwitchCase(const SwitchCase *S) { + extendRegion(S); + + SourceMappingRegion &Parent = getRegion(); + + Counter Count = addCounters(Parent.getCounter(), getRegionCounter(S)); + // Reuse the existing region if it starts at our label. This is typical of + // the first case in a switch. + if (Parent.hasStartLoc() && Parent.getStartLoc() == getStart(S)) + Parent.setCounter(Count); + else + pushRegion(Count, getStart(S)); + + if (const CaseStmt *CS = dyn_cast<CaseStmt>(S)) { + Visit(CS->getLHS()); + if (const Expr *RHS = CS->getRHS()) + Visit(RHS); + } Visit(S->getSubStmt()); } void VisitIfStmt(const IfStmt *S) { - mapSourceCodeRange(S->getLocStart()); - Visit(S->getCond()); - mapToken(S->getElseLoc()); - - // Counter tracks the "then" part of an if statement. The count for - // the "else" part, if it exists, will be calculated from this counter. - RegionMapper Cnt(this, S); - Cnt.beginRegion(); - Visit(S->getThen()); - Cnt.adjustForControlFlow(); - - if (S->getElse()) { - Cnt.beginElseRegion(); - Visit(S->getElse()); - Cnt.adjustForControlFlow(); - } - Cnt.applyAdjustmentsToRegion(); + extendRegion(S); + + Counter ParentCount = getRegion().getCounter(); + Counter ThenCount = getRegionCounter(S); + + // Emitting a counter for the condition makes it easier to interpret the + // counter for the body when looking at the coverage. + propagateCounts(ParentCount, S->getCond()); + + extendRegion(S->getThen()); + Counter OutCount = propagateCounts(ThenCount, S->getThen()); + + Counter ElseCount = subtractCounters(ParentCount, ThenCount); + if (const Stmt *Else = S->getElse()) { + extendRegion(S->getElse()); + OutCount = addCounters(OutCount, propagateCounts(ElseCount, Else)); + } else + OutCount = addCounters(OutCount, ElseCount); + + if (OutCount != ParentCount) + pushRegion(OutCount); } void VisitCXXTryStmt(const CXXTryStmt *S) { - mapSourceCodeRange(S->getLocStart()); + extendRegion(S); Visit(S->getTryBlock()); for (unsigned I = 0, E = S->getNumHandlers(); I < E; ++I) Visit(S->getHandler(I)); - // Counter tracks the continuation block of the try statement. - RegionMapper Cnt(this, S); - Cnt.beginRegion(); + + Counter ExitCount = getRegionCounter(S); + pushRegion(ExitCount); } void VisitCXXCatchStmt(const CXXCatchStmt *S) { - mapSourceCodeRange(S->getLocStart()); - // Counter tracks the catch statement's handler block. - RegionMapper Cnt(this, S); - Cnt.beginRegion(); - Visit(S->getHandlerBlock()); + extendRegion(S); + propagateCounts(getRegionCounter(S), S->getHandlerBlock()); } void VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { - Visit(E->getCond()); - mapToken(E->getQuestionLoc()); - mapToken(E->getColonLoc()); + extendRegion(E); - // Counter tracks the "true" part of a conditional operator. The - // count in the "false" part will be calculated from this counter. - RegionMapper Cnt(this, E); - Cnt.beginRegion(); - Visit(E->getTrueExpr()); - Cnt.adjustForControlFlow(); + Counter ParentCount = getRegion().getCounter(); + Counter TrueCount = getRegionCounter(E); - Cnt.beginElseRegion(); - Visit(E->getFalseExpr()); - Cnt.adjustForControlFlow(); + Visit(E->getCond()); - Cnt.applyAdjustmentsToRegion(); + if (!isa<BinaryConditionalOperator>(E)) { + extendRegion(E->getTrueExpr()); + propagateCounts(TrueCount, E->getTrueExpr()); + } + extendRegion(E->getFalseExpr()); + propagateCounts(subtractCounters(ParentCount, TrueCount), + E->getFalseExpr()); } void VisitBinLAnd(const BinaryOperator *E) { + extendRegion(E); Visit(E->getLHS()); - mapToken(E->getOperatorLoc()); - // Counter tracks the right hand side of a logical and operator. - RegionMapper Cnt(this, E); - Cnt.beginRegion(); - Visit(E->getRHS()); - Cnt.adjustForControlFlow(); - Cnt.applyAdjustmentsToRegion(); - } - - void VisitBinLOr(const BinaryOperator *E) { - Visit(E->getLHS()); - mapToken(E->getOperatorLoc()); - // Counter tracks the right hand side of a logical or operator. - RegionMapper Cnt(this, E); - Cnt.beginRegion(); - Visit(E->getRHS()); - Cnt.adjustForControlFlow(); - Cnt.applyAdjustmentsToRegion(); - } - void VisitParenExpr(const ParenExpr *E) { - mapToken(E->getLParen()); - Visit(E->getSubExpr()); - mapToken(E->getRParen()); + extendRegion(E->getRHS()); + propagateCounts(getRegionCounter(E), E->getRHS()); } - void VisitBinaryOperator(const BinaryOperator *E) { - Visit(E->getLHS()); - mapToken(E->getOperatorLoc()); - Visit(E->getRHS()); - } - - void VisitUnaryOperator(const UnaryOperator *E) { - bool Postfix = E->isPostfix(); - if (!Postfix) - mapToken(E->getOperatorLoc()); - Visit(E->getSubExpr()); - if (Postfix) - mapToken(E->getOperatorLoc()); - } - - void VisitMemberExpr(const MemberExpr *E) { - Visit(E->getBase()); - mapToken(E->getMemberLoc()); - } - - void VisitCallExpr(const CallExpr *E) { - Visit(E->getCallee()); - for (const auto &Arg : E->arguments()) - Visit(Arg); - mapToken(E->getRParenLoc()); - } - - void VisitArraySubscriptExpr(const ArraySubscriptExpr *E) { + void VisitBinLOr(const BinaryOperator *E) { + extendRegion(E); Visit(E->getLHS()); - Visit(E->getRHS()); - mapToken(E->getRBracketLoc()); - } - - void VisitCStyleCastExpr(const CStyleCastExpr *E) { - mapToken(E->getLParenLoc()); - mapToken(E->getRParenLoc()); - Visit(E->getSubExpr()); - } - - // Map literals as tokens so that the macros like #define PI 3.14 - // won't generate coverage mapping regions. - void VisitIntegerLiteral(const IntegerLiteral *E) { - mapToken(E->getLocStart()); + extendRegion(E->getRHS()); + propagateCounts(getRegionCounter(E), E->getRHS()); } - void VisitFloatingLiteral(const FloatingLiteral *E) { - mapToken(E->getLocStart()); - } - - void VisitCharacterLiteral(const CharacterLiteral *E) { - mapToken(E->getLocStart()); - } - - void VisitStringLiteral(const StringLiteral *E) { - mapToken(E->getLocStart()); - } - - void VisitImaginaryLiteral(const ImaginaryLiteral *E) { - mapToken(E->getLocStart()); - } - - void VisitObjCMessageExpr(const ObjCMessageExpr *E) { - mapToken(E->getLeftLoc()); - for (Stmt::const_child_range I = static_cast<const Stmt*>(E)->children(); I; - ++I) { - if (*I) - this->Visit(*I); - } - mapToken(E->getRightLoc()); + void VisitLambdaExpr(const LambdaExpr *LE) { + // Lambdas are treated as their own functions for now, so we shouldn't + // propagate counts into them. } }; } @@ -1009,10 +891,12 @@ static StringRef getCoverageSection(const CodeGenModule &CGM) { return isMachO(CGM) ? "__DATA,__llvm_covmap" : "__llvm_covmap"; } -static void dump(llvm::raw_ostream &OS, const CoverageMappingRecord &Function) { - OS << Function.FunctionName << ":\n"; - CounterMappingContext Ctx(Function.Expressions); - for (const auto &R : Function.MappingRegions) { +static void dump(llvm::raw_ostream &OS, StringRef FunctionName, + ArrayRef<CounterExpression> Expressions, + ArrayRef<CounterMappingRegion> Regions) { + OS << FunctionName << ":\n"; + CounterMappingContext Ctx(Expressions); + for (const auto &R : Regions) { OS.indent(2); switch (R.Kind) { case CounterMappingRegion::CodeRegion: @@ -1025,15 +909,12 @@ static void dump(llvm::raw_ostream &OS, const CoverageMappingRecord &Function) { break; } - OS << "File " << R.FileID << ", " << R.LineStart << ":" - << R.ColumnStart << " -> " << R.LineEnd << ":" << R.ColumnEnd - << " = "; - Ctx.dump(R.Count); - OS << " (HasCodeBefore = " << R.HasCodeBefore; + OS << "File " << R.FileID << ", " << R.LineStart << ":" << R.ColumnStart + << " -> " << R.LineEnd << ":" << R.ColumnEnd << " = "; + Ctx.dump(R.Count, OS); if (R.Kind == CounterMappingRegion::ExpansionRegion) - OS << ", Expanded file = " << R.ExpandedFileID; - - OS << ")\n"; + OS << " (Expanded file = " << R.ExpandedFileID << ")"; + OS << "\n"; } } @@ -1072,13 +953,11 @@ void CoverageMappingModuleGen::addFunctionMappingRecord( FilenameRefs.resize(FileEntries.size()); for (const auto &Entry : FileEntries) FilenameRefs[Entry.second] = Entry.first->getName(); - RawCoverageMappingReader Reader(FunctionNameValue, CoverageMapping, - FilenameRefs, - Filenames, Expressions, Regions); - CoverageMappingRecord FunctionRecord; - if (Reader.read(FunctionRecord)) + RawCoverageMappingReader Reader(CoverageMapping, FilenameRefs, Filenames, + Expressions, Regions); + if (Reader.read()) return; - dump(llvm::outs(), FunctionRecord); + dump(llvm::outs(), FunctionNameValue, Expressions, Regions); } } @@ -1098,7 +977,7 @@ void CoverageMappingModuleGen::emit() { llvm::sys::fs::make_absolute(Path); auto I = Entry.second; - FilenameStrs[I] = std::move(std::string(Path.begin(), Path.end())); + FilenameStrs[I] = std::string(Path.begin(), Path.end()); FilenameRefs[I] = FilenameStrs[I]; } diff --git a/contrib/llvm/tools/clang/lib/CodeGen/EHScopeStack.h b/contrib/llvm/tools/clang/lib/CodeGen/EHScopeStack.h index e695848..a795188 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/EHScopeStack.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/EHScopeStack.h @@ -17,6 +17,7 @@ #define LLVM_CLANG_LIB_CODEGEN_EHSCOPESTACK_H #include "clang/Basic/LLVM.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Instructions.h" @@ -75,8 +76,14 @@ template <class T> struct DominatingPointer<T,false> : InvariantValue<T*> {}; template <class T> struct DominatingValue<T*> : DominatingPointer<T> {}; enum CleanupKind : unsigned { + /// Denotes a cleanup that should run when a scope is exited using exceptional + /// control flow (a throw statement leading to stack unwinding, ). EHCleanup = 0x1, + + /// Denotes a cleanup that should run when a scope is exited using normal + /// control flow (falling off the end of the scope, return, goto, ...). NormalCleanup = 0x2, + NormalAndEHCleanup = EHCleanup | NormalCleanup, InactiveCleanup = 0x4, @@ -175,84 +182,28 @@ public: virtual void Emit(CodeGenFunction &CGF, Flags flags) = 0; }; - /// ConditionalCleanupN stores the saved form of its N parameters, + /// ConditionalCleanup stores the saved form of its parameters, /// then restores them and performs the cleanup. - template <class T, class A0> - class ConditionalCleanup1 : public Cleanup { - typedef typename DominatingValue<A0>::saved_type A0_saved; - A0_saved a0_saved; - - void Emit(CodeGenFunction &CGF, Flags flags) override { - A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved); - T(a0).Emit(CGF, flags); - } - - public: - ConditionalCleanup1(A0_saved a0) - : a0_saved(a0) {} - }; - - template <class T, class A0, class A1> - class ConditionalCleanup2 : public Cleanup { - typedef typename DominatingValue<A0>::saved_type A0_saved; - typedef typename DominatingValue<A1>::saved_type A1_saved; - A0_saved a0_saved; - A1_saved a1_saved; - - void Emit(CodeGenFunction &CGF, Flags flags) override { - A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved); - A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved); - T(a0, a1).Emit(CGF, flags); + template <class T, class... As> class ConditionalCleanup : public Cleanup { + typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple; + SavedTuple Saved; + + template <std::size_t... Is> + T restore(CodeGenFunction &CGF, llvm::index_sequence<Is...>) { + // It's important that the restores are emitted in order. The braced init + // list guarentees that. + return T{DominatingValue<As>::restore(CGF, std::get<Is>(Saved))...}; } - public: - ConditionalCleanup2(A0_saved a0, A1_saved a1) - : a0_saved(a0), a1_saved(a1) {} - }; - - template <class T, class A0, class A1, class A2> - class ConditionalCleanup3 : public Cleanup { - typedef typename DominatingValue<A0>::saved_type A0_saved; - typedef typename DominatingValue<A1>::saved_type A1_saved; - typedef typename DominatingValue<A2>::saved_type A2_saved; - A0_saved a0_saved; - A1_saved a1_saved; - A2_saved a2_saved; - void Emit(CodeGenFunction &CGF, Flags flags) override { - A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved); - A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved); - A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved); - T(a0, a1, a2).Emit(CGF, flags); + restore(CGF, llvm::index_sequence_for<As...>()).Emit(CGF, flags); } public: - ConditionalCleanup3(A0_saved a0, A1_saved a1, A2_saved a2) - : a0_saved(a0), a1_saved(a1), a2_saved(a2) {} - }; - - template <class T, class A0, class A1, class A2, class A3> - class ConditionalCleanup4 : public Cleanup { - typedef typename DominatingValue<A0>::saved_type A0_saved; - typedef typename DominatingValue<A1>::saved_type A1_saved; - typedef typename DominatingValue<A2>::saved_type A2_saved; - typedef typename DominatingValue<A3>::saved_type A3_saved; - A0_saved a0_saved; - A1_saved a1_saved; - A2_saved a2_saved; - A3_saved a3_saved; + ConditionalCleanup(typename DominatingValue<As>::saved_type... A) + : Saved(A...) {} - void Emit(CodeGenFunction &CGF, Flags flags) override { - A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved); - A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved); - A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved); - A3 a3 = DominatingValue<A3>::restore(CGF, a3_saved); - T(a0, a1, a2, a3).Emit(CGF, flags); - } - - public: - ConditionalCleanup4(A0_saved a0, A1_saved a1, A2_saved a2, A3_saved a3) - : a0_saved(a0), a1_saved(a1), a2_saved(a2), a3_saved(a3) {} + ConditionalCleanup(SavedTuple Tuple) : Saved(std::move(Tuple)) {} }; private: @@ -306,53 +257,18 @@ public: InnermostEHScope(stable_end()) {} ~EHScopeStack() { delete[] StartOfBuffer; } - // Variadic templates would make this not terrible. - /// Push a lazily-created cleanup on the stack. - template <class T> - void pushCleanup(CleanupKind Kind) { + template <class T, class... As> void pushCleanup(CleanupKind Kind, As... A) { void *Buffer = pushCleanup(Kind, sizeof(T)); - Cleanup *Obj = new(Buffer) T(); + Cleanup *Obj = new (Buffer) T(A...); (void) Obj; } - /// Push a lazily-created cleanup on the stack. - template <class T, class A0> - void pushCleanup(CleanupKind Kind, A0 a0) { - void *Buffer = pushCleanup(Kind, sizeof(T)); - Cleanup *Obj = new(Buffer) T(a0); - (void) Obj; - } - - /// Push a lazily-created cleanup on the stack. - template <class T, class A0, class A1> - void pushCleanup(CleanupKind Kind, A0 a0, A1 a1) { - void *Buffer = pushCleanup(Kind, sizeof(T)); - Cleanup *Obj = new(Buffer) T(a0, a1); - (void) Obj; - } - - /// Push a lazily-created cleanup on the stack. - template <class T, class A0, class A1, class A2> - void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2) { + /// Push a lazily-created cleanup on the stack. Tuple version. + template <class T, class... As> + void pushCleanupTuple(CleanupKind Kind, std::tuple<As...> A) { void *Buffer = pushCleanup(Kind, sizeof(T)); - Cleanup *Obj = new(Buffer) T(a0, a1, a2); - (void) Obj; - } - - /// Push a lazily-created cleanup on the stack. - template <class T, class A0, class A1, class A2, class A3> - void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3) { - void *Buffer = pushCleanup(Kind, sizeof(T)); - Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3); - (void) Obj; - } - - /// Push a lazily-created cleanup on the stack. - template <class T, class A0, class A1, class A2, class A3, class A4> - void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3, A4 a4) { - void *Buffer = pushCleanup(Kind, sizeof(T)); - Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3, a4); + Cleanup *Obj = new (Buffer) T(std::move(A)); (void) Obj; } @@ -369,10 +285,10 @@ public: /// /// The pointer returned from this method is valid until the cleanup /// stack is modified. - template <class T, class A0, class A1, class A2> - T *pushCleanupWithExtra(CleanupKind Kind, size_t N, A0 a0, A1 a1, A2 a2) { + template <class T, class... As> + T *pushCleanupWithExtra(CleanupKind Kind, size_t N, As... A) { void *Buffer = pushCleanup(Kind, sizeof(T) + T::getExtraSize(N)); - return new (Buffer) T(N, a0, a1, a2); + return new (Buffer) T(N, A...); } void pushCopyOfCleanup(CleanupKind Kind, const void *Cleanup, size_t Size) { @@ -403,6 +319,10 @@ public: /// Pops a terminate handler off the stack. void popTerminate(); + // Returns true iff the current scope is either empty or contains only + // lifetime markers, i.e. no real cleanup code + bool containsOnlyLifetimeMarkers(stable_iterator Old) const; + /// Determines whether the exception-scopes stack is empty. bool empty() const { return StartOfData == EndOfBuffer; } diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp index 9e88b1e..0a1a4ce 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp @@ -19,14 +19,18 @@ //===----------------------------------------------------------------------===// #include "CGCXXABI.h" +#include "CGCleanup.h" #include "CGRecordLayout.h" #include "CGVTables.h" #include "CodeGenFunction.h" #include "CodeGenModule.h" +#include "TargetInfo.h" #include "clang/AST/Mangle.h" #include "clang/AST/Type.h" +#include "clang/AST/StmtCXX.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/DataLayout.h" +#include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Value.h" @@ -111,10 +115,22 @@ public: const CXXDestructorDecl *Dtor) override; void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override; + void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override; + + void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override; + + llvm::CallInst * + emitTerminateForUnexpectedException(CodeGenFunction &CGF, + llvm::Value *Exn) override; void EmitFundamentalRTTIDescriptor(QualType Type); void EmitFundamentalRTTIDescriptors(); llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override; + llvm::Constant * + getAddrOfCXXCatchHandlerType(QualType Ty, + QualType CatchHandlerType) override { + return getAddrOfRTTIDescriptor(Ty); + } bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override; void EmitBadTypeidCall(CodeGenFunction &CGF) override; @@ -849,7 +865,7 @@ bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const { /// The Itanium ABI requires non-zero initialization only for data /// member pointers, for which '0' is a valid offset. bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) { - return MPT->getPointeeType()->isFunctionType(); + return MPT->isMemberFunctionPointer(); } /// The Itanium ABI always places an offset to the complete object @@ -906,6 +922,59 @@ void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) { CGF.EmitRuntimeCallOrInvoke(Fn); } +static llvm::Constant *getAllocateExceptionFn(CodeGenModule &CGM) { + // void *__cxa_allocate_exception(size_t thrown_size); + + llvm::FunctionType *FTy = + llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*IsVarArgs=*/false); + + return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception"); +} + +static llvm::Constant *getThrowFn(CodeGenModule &CGM) { + // void __cxa_throw(void *thrown_exception, std::type_info *tinfo, + // void (*dest) (void *)); + + llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy }; + llvm::FunctionType *FTy = + llvm::FunctionType::get(CGM.VoidTy, Args, /*IsVarArgs=*/false); + + return CGM.CreateRuntimeFunction(FTy, "__cxa_throw"); +} + +void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) { + QualType ThrowType = E->getSubExpr()->getType(); + // Now allocate the exception object. + llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType()); + uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity(); + + llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(CGM); + llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall( + AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception"); + + CGF.EmitAnyExprToExn(E->getSubExpr(), ExceptionPtr); + + // Now throw the exception. + llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType, + /*ForEH=*/true); + + // The address of the destructor. If the exception type has a + // trivial destructor (or isn't a record), we just pass null. + llvm::Constant *Dtor = nullptr; + if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) { + CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl()); + if (!Record->hasTrivialDestructor()) { + CXXDestructorDecl *DtorD = Record->getDestructor(); + Dtor = CGM.getAddrOfCXXStructor(DtorD, StructorType::Complete); + Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy); + } + } + if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy); + + llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor }; + CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args); +} + static llvm::Constant *getItaniumDynamicCastFn(CodeGenFunction &CGF) { // void *__dynamic_cast(const void *sub, // const abi::__class_type_info *src, @@ -1259,6 +1328,9 @@ void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT, // Set the correct linkage. VTable->setLinkage(Linkage); + if (CGM.supportsCOMDAT() && VTable->isWeakForLinker()) + VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName())); + // Set the right visibility. CGM.setGlobalVisibility(VTable, RD); @@ -1278,6 +1350,8 @@ void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT, cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") && DC->getParent()->isTranslationUnit()) EmitFundamentalRTTIDescriptors(); + + CGM.EmitVTableBitSetEntries(VTable, VTLayout); } llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor( @@ -1314,7 +1388,7 @@ llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor( llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr( BaseSubobject Base, const CXXRecordDecl *VTableClass) { - llvm::Constant *VTable = getAddrOfVTable(VTableClass, CharUnits()); + auto *VTable = getAddrOfVTable(VTableClass, CharUnits()); // Find the appropriate vtable within the vtable group. uint64_t AddressPoint = CGM.getItaniumVTableContext() @@ -1325,7 +1399,8 @@ llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr( llvm::ConstantInt::get(CGM.Int64Ty, AddressPoint) }; - return llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Indices); + return llvm::ConstantExpr::getInBoundsGetElementPtr(VTable->getValueType(), + VTable, Indices); } llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, @@ -1369,6 +1444,9 @@ llvm::Value *ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF, Ty = Ty->getPointerTo()->getPointerTo(); llvm::Value *VTable = CGF.GetVTablePtr(This, Ty); + if (CGF.SanOpts.has(SanitizerKind::CFIVCall)) + CGF.EmitVTablePtrCheckForCall(cast<CXXMethodDecl>(GD.getDecl()), VTable); + uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD); llvm::Value *VFuncPtr = CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn"); @@ -1591,7 +1669,7 @@ llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, CGF.Builder.CreateStore(elementSize, cookie); // The second element is the element count. - cookie = CGF.Builder.CreateConstInBoundsGEP1_32(cookie, 1); + cookie = CGF.Builder.CreateConstInBoundsGEP1_32(CGF.SizeTy, cookie, 1); CGF.Builder.CreateStore(numElements, cookie); // Finally, compute a pointer to the actual data buffer by skipping @@ -1714,11 +1792,12 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF, // The ABI says: It is suggested that it be emitted in the same COMDAT group // as the associated data object - if (!D.isLocalVarDecl() && var->isWeakForLinker() && CGM.supportsCOMDAT()) { - llvm::Comdat *C = CGM.getModule().getOrInsertComdat(var->getName()); + llvm::Comdat *C = var->getComdat(); + if (!D.isLocalVarDecl() && C) { guard->setComdat(C); - var->setComdat(C); CGF.CurFn->setComdat(C); + } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) { + guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName())); } CGM.setStaticLocalDeclGuardAddress(&D, guard); @@ -2011,7 +2090,7 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs( CGBuilderTy Builder(Entry); if (InitIsInitFunc) { if (Init) - Builder.CreateCall(Init); + Builder.CreateCall(Init, {}); } else { // Don't know whether we have an init function. Call it if it exists. llvm::Value *Have = Builder.CreateIsNotNull(Init); @@ -2020,7 +2099,7 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs( Builder.CreateCondBr(Have, InitBB, ExitBB); Builder.SetInsertPoint(InitBB); - Builder.CreateCall(Init); + Builder.CreateCall(Init, {}); Builder.CreateBr(ExitBB); Builder.SetInsertPoint(ExitBB); @@ -2049,7 +2128,7 @@ LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD, Ty); llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val); - Val = CGF.Builder.CreateCall(Wrapper); + Val = CGF.Builder.CreateCall(Wrapper, {}); LValue LV; if (VD->getType()->isReferenceType()) @@ -2528,7 +2607,8 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) { // The vtable address point is 2. llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2); - VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Two); + VTable = + llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable, Two); VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy); Fields.push_back(VTable); @@ -2718,9 +2798,13 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) { llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields); + llvm::Module &M = CGM.getModule(); llvm::GlobalVariable *GV = - new llvm::GlobalVariable(CGM.getModule(), Init->getType(), - /*Constant=*/true, Linkage, Init, Name); + new llvm::GlobalVariable(M, Init->getType(), + /*Constant=*/true, Linkage, Init, Name); + + if (CGM.supportsCOMDAT() && GV->isWeakForLinker()) + GV->setComdat(M.getOrInsertComdat(GV->getName())); // If there's already an old global variable, replace it with the new one. if (OldGV) { @@ -3136,8 +3220,8 @@ static void emitConstructorDestructorAlias(CodeGenModule &CGM, llvm::PointerType *AliasType = Aliasee->getType(); // Create the alias with no name. - auto *Alias = llvm::GlobalAlias::create( - AliasType->getElementType(), 0, Linkage, "", Aliasee, &CGM.getModule()); + auto *Alias = llvm::GlobalAlias::create(AliasType, Linkage, "", Aliasee, + &CGM.getModule()); // Switch any previous uses to the alias. if (Entry) { @@ -3204,5 +3288,352 @@ void ItaniumCXXABI::emitCXXStructor(const CXXMethodDecl *MD, getMangleContext().mangleCXXCtorComdat(CD, Out); llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str()); Fn->setComdat(C); + } else { + CGM.maybeSetTrivialComdat(*MD, *Fn); + } +} + +static llvm::Constant *getBeginCatchFn(CodeGenModule &CGM) { + // void *__cxa_begin_catch(void*); + llvm::FunctionType *FTy = llvm::FunctionType::get( + CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false); + + return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch"); +} + +static llvm::Constant *getEndCatchFn(CodeGenModule &CGM) { + // void __cxa_end_catch(); + llvm::FunctionType *FTy = + llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false); + + return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch"); +} + +static llvm::Constant *getGetExceptionPtrFn(CodeGenModule &CGM) { + // void *__cxa_get_exception_ptr(void*); + llvm::FunctionType *FTy = llvm::FunctionType::get( + CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false); + + return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr"); +} + +namespace { + /// A cleanup to call __cxa_end_catch. In many cases, the caught + /// exception type lets us state definitively that the thrown exception + /// type does not have a destructor. In particular: + /// - Catch-alls tell us nothing, so we have to conservatively + /// assume that the thrown exception might have a destructor. + /// - Catches by reference behave according to their base types. + /// - Catches of non-record types will only trigger for exceptions + /// of non-record types, which never have destructors. + /// - Catches of record types can trigger for arbitrary subclasses + /// of the caught type, so we have to assume the actual thrown + /// exception type might have a throwing destructor, even if the + /// caught type's destructor is trivial or nothrow. + struct CallEndCatch : EHScopeStack::Cleanup { + CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {} + bool MightThrow; + + void Emit(CodeGenFunction &CGF, Flags flags) override { + if (!MightThrow) { + CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM)); + return; + } + + CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM)); + } + }; +} + +/// Emits a call to __cxa_begin_catch and enters a cleanup to call +/// __cxa_end_catch. +/// +/// \param EndMightThrow - true if __cxa_end_catch might throw +static llvm::Value *CallBeginCatch(CodeGenFunction &CGF, + llvm::Value *Exn, + bool EndMightThrow) { + llvm::CallInst *call = + CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn); + + CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow); + + return call; +} + +/// A "special initializer" callback for initializing a catch +/// parameter during catch initialization. +static void InitCatchParam(CodeGenFunction &CGF, + const VarDecl &CatchParam, + llvm::Value *ParamAddr, + SourceLocation Loc) { + // Load the exception from where the landing pad saved it. + llvm::Value *Exn = CGF.getExceptionFromSlot(); + + CanQualType CatchType = + CGF.CGM.getContext().getCanonicalType(CatchParam.getType()); + llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType); + + // If we're catching by reference, we can just cast the object + // pointer to the appropriate pointer. + if (isa<ReferenceType>(CatchType)) { + QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType(); + bool EndCatchMightThrow = CaughtType->isRecordType(); + + // __cxa_begin_catch returns the adjusted object pointer. + llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow); + + // We have no way to tell the personality function that we're + // catching by reference, so if we're catching a pointer, + // __cxa_begin_catch will actually return that pointer by value. + if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) { + QualType PointeeType = PT->getPointeeType(); + + // When catching by reference, generally we should just ignore + // this by-value pointer and use the exception object instead. + if (!PointeeType->isRecordType()) { + + // Exn points to the struct _Unwind_Exception header, which + // we have to skip past in order to reach the exception data. + unsigned HeaderSize = + CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException(); + AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize); + + // However, if we're catching a pointer-to-record type that won't + // work, because the personality function might have adjusted + // the pointer. There's actually no way for us to fully satisfy + // the language/ABI contract here: we can't use Exn because it + // might have the wrong adjustment, but we can't use the by-value + // pointer because it's off by a level of abstraction. + // + // The current solution is to dump the adjusted pointer into an + // alloca, which breaks language semantics (because changing the + // pointer doesn't change the exception) but at least works. + // The better solution would be to filter out non-exact matches + // and rethrow them, but this is tricky because the rethrow + // really needs to be catchable by other sites at this landing + // pad. The best solution is to fix the personality function. + } else { + // Pull the pointer for the reference type off. + llvm::Type *PtrTy = + cast<llvm::PointerType>(LLVMCatchTy)->getElementType(); + + // Create the temporary and write the adjusted pointer into it. + llvm::Value *ExnPtrTmp = CGF.CreateTempAlloca(PtrTy, "exn.byref.tmp"); + llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy); + CGF.Builder.CreateStore(Casted, ExnPtrTmp); + + // Bind the reference to the temporary. + AdjustedExn = ExnPtrTmp; + } + } + + llvm::Value *ExnCast = + CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref"); + CGF.Builder.CreateStore(ExnCast, ParamAddr); + return; + } + + // Scalars and complexes. + TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType); + if (TEK != TEK_Aggregate) { + llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false); + + // If the catch type is a pointer type, __cxa_begin_catch returns + // the pointer by value. + if (CatchType->hasPointerRepresentation()) { + llvm::Value *CastExn = + CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted"); + + switch (CatchType.getQualifiers().getObjCLifetime()) { + case Qualifiers::OCL_Strong: + CastExn = CGF.EmitARCRetainNonBlock(CastExn); + // fallthrough + + case Qualifiers::OCL_None: + case Qualifiers::OCL_ExplicitNone: + case Qualifiers::OCL_Autoreleasing: + CGF.Builder.CreateStore(CastExn, ParamAddr); + return; + + case Qualifiers::OCL_Weak: + CGF.EmitARCInitWeak(ParamAddr, CastExn); + return; + } + llvm_unreachable("bad ownership qualifier!"); + } + + // Otherwise, it returns a pointer into the exception object. + + llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok + llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy); + + LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType); + LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType, + CGF.getContext().getDeclAlign(&CatchParam)); + switch (TEK) { + case TEK_Complex: + CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV, + /*init*/ true); + return; + case TEK_Scalar: { + llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc); + CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true); + return; + } + case TEK_Aggregate: + llvm_unreachable("evaluation kind filtered out!"); + } + llvm_unreachable("bad evaluation kind"); + } + + assert(isa<RecordType>(CatchType) && "unexpected catch type!"); + + llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok + + // Check for a copy expression. If we don't have a copy expression, + // that means a trivial copy is okay. + const Expr *copyExpr = CatchParam.getInit(); + if (!copyExpr) { + llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true); + llvm::Value *adjustedExn = CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy); + CGF.EmitAggregateCopy(ParamAddr, adjustedExn, CatchType); + return; + } + + // We have to call __cxa_get_exception_ptr to get the adjusted + // pointer before copying. + llvm::CallInst *rawAdjustedExn = + CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn); + + // Cast that to the appropriate type. + llvm::Value *adjustedExn = CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy); + + // The copy expression is defined in terms of an OpaqueValueExpr. + // Find it and map it to the adjusted expression. + CodeGenFunction::OpaqueValueMapping + opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr), + CGF.MakeAddrLValue(adjustedExn, CatchParam.getType())); + + // Call the copy ctor in a terminate scope. + CGF.EHStack.pushTerminate(); + + // Perform the copy construction. + CharUnits Alignment = CGF.getContext().getDeclAlign(&CatchParam); + CGF.EmitAggExpr(copyExpr, + AggValueSlot::forAddr(ParamAddr, Alignment, Qualifiers(), + AggValueSlot::IsNotDestructed, + AggValueSlot::DoesNotNeedGCBarriers, + AggValueSlot::IsNotAliased)); + + // Leave the terminate scope. + CGF.EHStack.popTerminate(); + + // Undo the opaque value mapping. + opaque.pop(); + + // Finally we can call __cxa_begin_catch. + CallBeginCatch(CGF, Exn, true); +} + +/// Begins a catch statement by initializing the catch variable and +/// calling __cxa_begin_catch. +void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF, + const CXXCatchStmt *S) { + // We have to be very careful with the ordering of cleanups here: + // C++ [except.throw]p4: + // The destruction [of the exception temporary] occurs + // immediately after the destruction of the object declared in + // the exception-declaration in the handler. + // + // So the precise ordering is: + // 1. Construct catch variable. + // 2. __cxa_begin_catch + // 3. Enter __cxa_end_catch cleanup + // 4. Enter dtor cleanup + // + // We do this by using a slightly abnormal initialization process. + // Delegation sequence: + // - ExitCXXTryStmt opens a RunCleanupsScope + // - EmitAutoVarAlloca creates the variable and debug info + // - InitCatchParam initializes the variable from the exception + // - CallBeginCatch calls __cxa_begin_catch + // - CallBeginCatch enters the __cxa_end_catch cleanup + // - EmitAutoVarCleanups enters the variable destructor cleanup + // - EmitCXXTryStmt emits the code for the catch body + // - EmitCXXTryStmt close the RunCleanupsScope + + VarDecl *CatchParam = S->getExceptionDecl(); + if (!CatchParam) { + llvm::Value *Exn = CGF.getExceptionFromSlot(); + CallBeginCatch(CGF, Exn, true); + return; + } + + // Emit the local. + CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam); + InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getLocStart()); + CGF.EmitAutoVarCleanups(var); +} + +/// Get or define the following function: +/// void @__clang_call_terminate(i8* %exn) nounwind noreturn +/// This code is used only in C++. +static llvm::Constant *getClangCallTerminateFn(CodeGenModule &CGM) { + llvm::FunctionType *fnTy = + llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false); + llvm::Constant *fnRef = + CGM.CreateRuntimeFunction(fnTy, "__clang_call_terminate"); + + llvm::Function *fn = dyn_cast<llvm::Function>(fnRef); + if (fn && fn->empty()) { + fn->setDoesNotThrow(); + fn->setDoesNotReturn(); + + // What we really want is to massively penalize inlining without + // forbidding it completely. The difference between that and + // 'noinline' is negligible. + fn->addFnAttr(llvm::Attribute::NoInline); + + // Allow this function to be shared across translation units, but + // we don't want it to turn into an exported symbol. + fn->setLinkage(llvm::Function::LinkOnceODRLinkage); + fn->setVisibility(llvm::Function::HiddenVisibility); + if (CGM.supportsCOMDAT()) + fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName())); + + // Set up the function. + llvm::BasicBlock *entry = + llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn); + CGBuilderTy builder(entry); + + // Pull the exception pointer out of the parameter list. + llvm::Value *exn = &*fn->arg_begin(); + + // Call __cxa_begin_catch(exn). + llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn); + catchCall->setDoesNotThrow(); + catchCall->setCallingConv(CGM.getRuntimeCC()); + + // Call std::terminate(). + llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn(), {}); + termCall->setDoesNotThrow(); + termCall->setDoesNotReturn(); + termCall->setCallingConv(CGM.getRuntimeCC()); + + // std::terminate cannot return. + builder.CreateUnreachable(); + } + + return fnRef; +} + +llvm::CallInst * +ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF, + llvm::Value *Exn) { + // In C++, we want to call __cxa_begin_catch() before terminating. + if (Exn) { + assert(CGF.CGM.getLangOpts().CPlusPlus); + return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn); } + return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn()); } diff --git a/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp index c067fab..de30883 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp @@ -17,12 +17,16 @@ #include "CGCXXABI.h" #include "CGVTables.h" #include "CodeGenModule.h" +#include "CodeGenTypes.h" +#include "TargetInfo.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" +#include "clang/AST/StmtCXX.h" #include "clang/AST/VTableBuilder.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringSet.h" #include "llvm/IR/CallSite.h" +#include "llvm/IR/Intrinsics.h" using namespace clang; using namespace CodeGen; @@ -40,7 +44,8 @@ public: MicrosoftCXXABI(CodeGenModule &CGM) : CGCXXABI(CGM), BaseClassDescriptorType(nullptr), ClassHierarchyDescriptorType(nullptr), - CompleteObjectLocatorType(nullptr) {} + CompleteObjectLocatorType(nullptr), CatchableTypeType(nullptr), + ThrowInfoType(nullptr), CatchHandlerTypeType(nullptr) {} bool HasThisReturn(GlobalDecl GD) const override; bool hasMostDerivedReturn(GlobalDecl GD) const override; @@ -71,11 +76,16 @@ public: const CXXDestructorDecl *Dtor) override; void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override; + void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override; + + void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override; llvm::GlobalVariable *getMSCompleteObjectLocator(const CXXRecordDecl *RD, const VPtrInfo *Info); llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override; + llvm::Constant * + getAddrOfCXXCatchHandlerType(QualType Ty, QualType CatchHandlerType) override; bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override; void EmitBadTypeidCall(CodeGenFunction &CGF) override; @@ -225,7 +235,7 @@ public: assert(GD.getDtorType() == Dtor_Deleting && "Only deleting destructor thunks are available in this ABI"); CallArgs.add(RValue::get(getStructorImplicitParamValue(CGF)), - CGM.getContext().IntTy); + getContext().IntTy); } void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override; @@ -410,6 +420,9 @@ public: if (!isImageRelative()) return PtrVal; + if (PtrVal->isNullValue()) + return llvm::Constant::getNullValue(CGM.IntTy); + llvm::Constant *ImageBaseAsInt = llvm::ConstantExpr::getPtrToInt(getImageBase(), CGM.IntPtrTy); llvm::Constant *PtrValAsInt = @@ -467,6 +480,10 @@ private: return GetVBaseOffsetFromVBPtr(CGF, Base, VBPOffset, VBTOffset, VBPtr); } + std::pair<llvm::Value *, llvm::Value *> + performBaseAdjustment(CodeGenFunction &CGF, llvm::Value *Value, + QualType SrcRecordTy); + /// \brief Performs a full virtual base adjustment. Used to dereference /// pointers to members of virtual bases. llvm::Value *AdjustVirtualBase(CodeGenFunction &CGF, const Expr *E, @@ -479,7 +496,8 @@ private: llvm::Constant *EmitFullMemberPointer(llvm::Constant *FirstField, bool IsMemberFunction, const CXXRecordDecl *RD, - CharUnits NonVirtualBaseAdjustment); + CharUnits NonVirtualBaseAdjustment, + unsigned VBTableIndex); llvm::Constant *BuildMemberPointer(const CXXRecordDecl *RD, const CXXMethodDecl *MD, @@ -556,6 +574,94 @@ public: void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override; + llvm::StructType *getCatchHandlerTypeType() { + if (!CatchHandlerTypeType) { + llvm::Type *FieldTypes[] = { + CGM.IntTy, // Flags + CGM.Int8PtrTy, // TypeDescriptor + }; + CatchHandlerTypeType = llvm::StructType::create( + CGM.getLLVMContext(), FieldTypes, "eh.CatchHandlerType"); + } + return CatchHandlerTypeType; + } + + llvm::StructType *getCatchableTypeType() { + if (CatchableTypeType) + return CatchableTypeType; + llvm::Type *FieldTypes[] = { + CGM.IntTy, // Flags + getImageRelativeType(CGM.Int8PtrTy), // TypeDescriptor + CGM.IntTy, // NonVirtualAdjustment + CGM.IntTy, // OffsetToVBPtr + CGM.IntTy, // VBTableIndex + CGM.IntTy, // Size + getImageRelativeType(CGM.Int8PtrTy) // CopyCtor + }; + CatchableTypeType = llvm::StructType::create( + CGM.getLLVMContext(), FieldTypes, "eh.CatchableType"); + return CatchableTypeType; + } + + llvm::StructType *getCatchableTypeArrayType(uint32_t NumEntries) { + llvm::StructType *&CatchableTypeArrayType = + CatchableTypeArrayTypeMap[NumEntries]; + if (CatchableTypeArrayType) + return CatchableTypeArrayType; + + llvm::SmallString<23> CTATypeName("eh.CatchableTypeArray."); + CTATypeName += llvm::utostr(NumEntries); + llvm::Type *CTType = + getImageRelativeType(getCatchableTypeType()->getPointerTo()); + llvm::Type *FieldTypes[] = { + CGM.IntTy, // NumEntries + llvm::ArrayType::get(CTType, NumEntries) // CatchableTypes + }; + CatchableTypeArrayType = + llvm::StructType::create(CGM.getLLVMContext(), FieldTypes, CTATypeName); + return CatchableTypeArrayType; + } + + llvm::StructType *getThrowInfoType() { + if (ThrowInfoType) + return ThrowInfoType; + llvm::Type *FieldTypes[] = { + CGM.IntTy, // Flags + getImageRelativeType(CGM.Int8PtrTy), // CleanupFn + getImageRelativeType(CGM.Int8PtrTy), // ForwardCompat + getImageRelativeType(CGM.Int8PtrTy) // CatchableTypeArray + }; + ThrowInfoType = llvm::StructType::create(CGM.getLLVMContext(), FieldTypes, + "eh.ThrowInfo"); + return ThrowInfoType; + } + + llvm::Constant *getThrowFn() { + // _CxxThrowException is passed an exception object and a ThrowInfo object + // which describes the exception. + llvm::Type *Args[] = {CGM.Int8PtrTy, getThrowInfoType()->getPointerTo()}; + llvm::FunctionType *FTy = + llvm::FunctionType::get(CGM.VoidTy, Args, /*IsVarArgs=*/false); + auto *Fn = cast<llvm::Function>( + CGM.CreateRuntimeFunction(FTy, "_CxxThrowException")); + // _CxxThrowException is stdcall on 32-bit x86 platforms. + if (CGM.getTarget().getTriple().getArch() == llvm::Triple::x86) + Fn->setCallingConv(llvm::CallingConv::X86_StdCall); + return Fn; + } + + llvm::Function *getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD, + CXXCtorType CT); + + llvm::Constant *getCatchableType(QualType T, + uint32_t NVOffset = 0, + int32_t VBPtrOffset = -1, + uint32_t VBIndex = 0); + + llvm::GlobalVariable *getCatchableTypeArray(QualType T); + + llvm::GlobalVariable *getThrowInfo(QualType T) override; + private: typedef std::pair<const CXXRecordDecl *, CharUnits> VFTableIdTy; typedef llvm::DenseMap<VFTableIdTy, llvm::GlobalVariable *> VTablesMapTy; @@ -582,11 +688,20 @@ private: /// Map from DeclContext to the current guard variable. We assume that the /// AST is visited in source code order. llvm::DenseMap<const DeclContext *, GuardInfo> GuardVariableMap; + llvm::DenseMap<const DeclContext *, GuardInfo> ThreadLocalGuardVariableMap; + llvm::DenseMap<const DeclContext *, unsigned> ThreadSafeGuardNumMap; llvm::DenseMap<size_t, llvm::StructType *> TypeDescriptorTypeMap; llvm::StructType *BaseClassDescriptorType; llvm::StructType *ClassHierarchyDescriptorType; llvm::StructType *CompleteObjectLocatorType; + + llvm::DenseMap<QualType, llvm::GlobalVariable *> CatchableTypeArrays; + + llvm::StructType *CatchableTypeType; + llvm::DenseMap<uint32_t, llvm::StructType *> CatchableTypeArrayTypeMap; + llvm::StructType *ThrowInfoType; + llvm::StructType *CatchHandlerTypeType; }; } @@ -667,55 +782,73 @@ void MicrosoftCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF, CGF.EmitDeleteCall(DE->getOperatorDelete(), MDThis, ElementType); } -static llvm::Function *getRethrowFn(CodeGenModule &CGM) { - // _CxxThrowException takes two pointer width arguments: a value and a context - // object which points to a TypeInfo object. - llvm::Type *ArgTypes[] = {CGM.Int8PtrTy, CGM.Int8PtrTy}; - llvm::FunctionType *FTy = - llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false); - auto *Fn = cast<llvm::Function>( - CGM.CreateRuntimeFunction(FTy, "_CxxThrowException")); - // _CxxThrowException is stdcall on 32-bit x86 platforms. - if (CGM.getTarget().getTriple().getArch() == llvm::Triple::x86) - Fn->setCallingConv(llvm::CallingConv::X86_StdCall); - return Fn; -} - void MicrosoftCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) { - llvm::Value *Args[] = {llvm::ConstantPointerNull::get(CGM.Int8PtrTy), - llvm::ConstantPointerNull::get(CGM.Int8PtrTy)}; - auto *Fn = getRethrowFn(CGM); + llvm::Value *Args[] = { + llvm::ConstantPointerNull::get(CGM.Int8PtrTy), + llvm::ConstantPointerNull::get(getThrowInfoType()->getPointerTo())}; + auto *Fn = getThrowFn(); if (isNoReturn) CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, Args); else CGF.EmitRuntimeCallOrInvoke(Fn, Args); } -/// \brief Gets the offset to the virtual base that contains the vfptr for -/// MS-ABI polymorphic types. -static llvm::Value *getPolymorphicOffset(CodeGenFunction &CGF, - const CXXRecordDecl *RD, - llvm::Value *Value) { - const ASTContext &Context = RD->getASTContext(); - for (const CXXBaseSpecifier &Base : RD->vbases()) - if (Context.getASTRecordLayout(Base.getType()->getAsCXXRecordDecl()) - .hasExtendableVFPtr()) - return CGF.CGM.getCXXABI().GetVirtualBaseClassOffset( - CGF, Value, RD, Base.getType()->getAsCXXRecordDecl()); - llvm_unreachable("One of our vbases should be polymorphic."); +namespace { +struct CallEndCatchMSVC : EHScopeStack::Cleanup { + CallEndCatchMSVC() {} + void Emit(CodeGenFunction &CGF, Flags flags) override { + CGF.EmitNounwindRuntimeCall( + CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_endcatch)); + } +}; } -static std::pair<llvm::Value *, llvm::Value *> -performBaseAdjustment(CodeGenFunction &CGF, llvm::Value *Value, - QualType SrcRecordTy) { +void MicrosoftCXXABI::emitBeginCatch(CodeGenFunction &CGF, + const CXXCatchStmt *S) { + // In the MS ABI, the runtime handles the copy, and the catch handler is + // responsible for destruction. + VarDecl *CatchParam = S->getExceptionDecl(); + llvm::Value *Exn = CGF.getExceptionFromSlot(); + llvm::Function *BeginCatch = + CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_begincatch); + + // If this is a catch-all or the catch parameter is unnamed, we don't need to + // emit an alloca to the object. + if (!CatchParam || !CatchParam->getDeclName()) { + llvm::Value *Args[2] = {Exn, llvm::Constant::getNullValue(CGF.Int8PtrTy)}; + CGF.EmitNounwindRuntimeCall(BeginCatch, Args); + CGF.EHStack.pushCleanup<CallEndCatchMSVC>(NormalCleanup); + return; + } + + CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam); + llvm::Value *ParamAddr = + CGF.Builder.CreateBitCast(var.getObjectAddress(CGF), CGF.Int8PtrTy); + llvm::Value *Args[2] = {Exn, ParamAddr}; + CGF.EmitNounwindRuntimeCall(BeginCatch, Args); + CGF.EHStack.pushCleanup<CallEndCatchMSVC>(NormalCleanup); + CGF.EmitAutoVarCleanups(var); +} + +std::pair<llvm::Value *, llvm::Value *> +MicrosoftCXXABI::performBaseAdjustment(CodeGenFunction &CGF, llvm::Value *Value, + QualType SrcRecordTy) { Value = CGF.Builder.CreateBitCast(Value, CGF.Int8PtrTy); const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl(); + const ASTContext &Context = getContext(); - if (CGF.getContext().getASTRecordLayout(SrcDecl).hasExtendableVFPtr()) + if (Context.getASTRecordLayout(SrcDecl).hasExtendableVFPtr()) return std::make_pair(Value, llvm::ConstantInt::get(CGF.Int32Ty, 0)); // Perform a base adjustment. - llvm::Value *Offset = getPolymorphicOffset(CGF, SrcDecl, Value); + const CXXBaseSpecifier *PolymorphicBase = std::find_if( + SrcDecl->vbases_begin(), SrcDecl->vbases_end(), + [&](const CXXBaseSpecifier &Base) { + const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl(); + return Context.getASTRecordLayout(BaseDecl).hasExtendableVFPtr(); + }); + llvm::Value *Offset = GetVirtualBaseClassOffset( + CGF, Value, SrcDecl, PolymorphicBase->getType()->getAsCXXRecordDecl()); Value = CGF.Builder.CreateInBoundsGEP(Value, Offset); Offset = CGF.Builder.CreateTrunc(Offset, CGF.Int32Ty); return std::make_pair(Value, Offset); @@ -725,7 +858,7 @@ bool MicrosoftCXXABI::shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) { const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl(); return IsDeref && - !CGM.getContext().getASTRecordLayout(SrcDecl).hasExtendableVFPtr(); + !getContext().getASTRecordLayout(SrcDecl).hasExtendableVFPtr(); } static llvm::CallSite emitRTtypeidCall(CodeGenFunction &CGF, @@ -759,7 +892,7 @@ bool MicrosoftCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr, QualType SrcRecordTy) { const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl(); return SrcIsPtr && - !CGM.getContext().getASTRecordLayout(SrcDecl).hasExtendableVFPtr(); + !getContext().getASTRecordLayout(SrcDecl).hasExtendableVFPtr(); } llvm::Value *MicrosoftCXXABI::EmitDynamicCastCall( @@ -817,10 +950,11 @@ bool MicrosoftCXXABI::EmitBadCastCall(CodeGenFunction &CGF) { llvm::Value *MicrosoftCXXABI::GetVirtualBaseClassOffset( CodeGenFunction &CGF, llvm::Value *This, const CXXRecordDecl *ClassDecl, const CXXRecordDecl *BaseClassDecl) { + const ASTContext &Context = getContext(); int64_t VBPtrChars = - getContext().getASTRecordLayout(ClassDecl).getVBPtrOffset().getQuantity(); + Context.getASTRecordLayout(ClassDecl).getVBPtrOffset().getQuantity(); llvm::Value *VBPtrOffset = llvm::ConstantInt::get(CGM.PtrDiffTy, VBPtrChars); - CharUnits IntSize = getContext().getTypeSizeInChars(getContext().IntTy); + CharUnits IntSize = Context.getTypeSizeInChars(Context.IntTy); CharUnits VBTableChars = IntSize * CGM.getMicrosoftVTableContext().getVBTableIndex(ClassDecl, BaseClassDecl); @@ -947,30 +1081,52 @@ void MicrosoftCXXABI::initializeHiddenVirtualInheritanceMembers( } } +static bool hasDefaultCXXMethodCC(ASTContext &Context, + const CXXMethodDecl *MD) { + CallingConv ExpectedCallingConv = Context.getDefaultCallingConvention( + /*IsVariadic=*/false, /*IsCXXMethod=*/true); + CallingConv ActualCallingConv = + MD->getType()->getAs<FunctionProtoType>()->getCallConv(); + return ExpectedCallingConv == ActualCallingConv; +} + void MicrosoftCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) { // There's only one constructor type in this ABI. CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete)); + + // Exported default constructors either have a simple call-site where they use + // the typical calling convention and have a single 'this' pointer for an + // argument -or- they get a wrapper function which appropriately thunks to the + // real default constructor. This thunk is the default constructor closure. + if (D->hasAttr<DLLExportAttr>() && D->isDefaultConstructor()) + if (!hasDefaultCXXMethodCC(getContext(), D) || D->getNumParams() != 0) { + llvm::Function *Fn = getAddrOfCXXCtorClosure(D, Ctor_DefaultClosure); + Fn->setLinkage(llvm::GlobalValue::WeakODRLinkage); + Fn->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass); + } } void MicrosoftCXXABI::EmitVBPtrStores(CodeGenFunction &CGF, const CXXRecordDecl *RD) { llvm::Value *ThisInt8Ptr = CGF.Builder.CreateBitCast(getThisValue(CGF), CGM.Int8PtrTy, "this.int8"); - const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); + const ASTContext &Context = getContext(); + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); const VBTableGlobals &VBGlobals = enumerateVBTables(RD); for (unsigned I = 0, E = VBGlobals.VBTables->size(); I != E; ++I) { const VPtrInfo *VBT = (*VBGlobals.VBTables)[I]; llvm::GlobalVariable *GV = VBGlobals.Globals[I]; const ASTRecordLayout &SubobjectLayout = - CGM.getContext().getASTRecordLayout(VBT->BaseWithVPtr); + Context.getASTRecordLayout(VBT->BaseWithVPtr); CharUnits Offs = VBT->NonVirtualOffset; Offs += SubobjectLayout.getVBPtrOffset(); if (VBT->getVBaseWithVPtr()) Offs += Layout.getVBaseClassOffset(VBT->getVBaseWithVPtr()); llvm::Value *VBPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ThisInt8Ptr, Offs.getQuantity()); - llvm::Value *GVPtr = CGF.Builder.CreateConstInBoundsGEP2_32(GV, 0, 0); + llvm::Value *GVPtr = + CGF.Builder.CreateConstInBoundsGEP2_32(GV->getValueType(), GV, 0, 0); VBPtr = CGF.Builder.CreateBitCast(VBPtr, GVPtr->getType()->getPointerTo(0), "vbptr." + VBT->ReusingBase->getName()); CGF.Builder.CreateStore(GVPtr, VBPtr); @@ -983,7 +1139,7 @@ MicrosoftCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T, // TODO: 'for base' flag if (T == StructorType::Deleting) { // The scalar deleting destructor takes an implicit int parameter. - ArgTys.push_back(CGM.getContext().IntTy); + ArgTys.push_back(getContext().IntTy); } auto *CD = dyn_cast<CXXConstructorDecl>(MD); if (!CD) @@ -996,9 +1152,9 @@ MicrosoftCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T, const FunctionProtoType *FPT = CD->getType()->castAs<FunctionProtoType>(); if (Class->getNumVBases()) { if (FPT->isVariadic()) - ArgTys.insert(ArgTys.begin() + 1, CGM.getContext().IntTy); + ArgTys.insert(ArgTys.begin() + 1, getContext().IntTy); else - ArgTys.push_back(CGM.getContext().IntTy); + ArgTys.push_back(getContext().IntTy); } } @@ -1038,7 +1194,7 @@ MicrosoftCXXABI::getVirtualFunctionPrologueThisAdjustment(GlobalDecl GD) { if (ML.VBase) { const ASTRecordLayout &DerivedLayout = - CGM.getContext().getASTRecordLayout(MD->getParent()); + getContext().getASTRecordLayout(MD->getParent()); Adjustment += DerivedLayout.getVBaseClassOffset(ML.VBase); } @@ -1104,7 +1260,7 @@ llvm::Value *MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall( // FIXME: Update the code that emits this adjustment in thunks prologues. This = CGF.Builder.CreateConstGEP1_32(This, StaticOffset.getQuantity()); } else { - This = CGF.Builder.CreateConstInBoundsGEP1_32(This, + This = CGF.Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, This, StaticOffset.getQuantity()); } } @@ -1159,8 +1315,8 @@ llvm::Value *MicrosoftCXXABI::adjustThisParameterInVirtualFunctionPrologue( This = CGF.Builder.CreateBitCast(This, charPtrTy); assert(Adjustment.isPositive()); - This = - CGF.Builder.CreateConstInBoundsGEP1_32(This, -Adjustment.getQuantity()); + This = CGF.Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, This, + -Adjustment.getQuantity()); return CGF.Builder.CreateBitCast(This, thisTy); } @@ -1280,7 +1436,7 @@ llvm::Value *MicrosoftCXXABI::getVTableAddressPointInStructor( llvm::GlobalValue *VTableAddressPoint = VFTablesMap[ID]; if (!VTableAddressPoint) { assert(Base.getBase()->getNumVBases() && - !CGM.getContext().getASTRecordLayout(Base.getBase()).hasOwnVFPtr()); + !getContext().getASTRecordLayout(Base.getBase()).hasOwnVFPtr()); } return VTableAddressPoint; } @@ -1336,98 +1492,96 @@ llvm::GlobalVariable *MicrosoftCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, #endif } - for (size_t J = 0, F = VFPtrs.size(); J != F; ++J) { - if (VFPtrs[J]->FullOffsetInMDC != VPtrOffset) - continue; - SmallString<256> VFTableName; - mangleVFTableName(getMangleContext(), RD, VFPtrs[J], VFTableName); - StringRef VTableName = VFTableName; - - uint64_t NumVTableSlots = - VTContext.getVFTableLayout(RD, VFPtrs[J]->FullOffsetInMDC) - .getNumVTableComponents(); - llvm::GlobalValue::LinkageTypes VTableLinkage = - llvm::GlobalValue::ExternalLinkage; - llvm::ArrayType *VTableType = - llvm::ArrayType::get(CGM.Int8PtrTy, NumVTableSlots); - if (getContext().getLangOpts().RTTIData) { - VTableLinkage = llvm::GlobalValue::PrivateLinkage; - VTableName = ""; - } - - VTable = CGM.getModule().getNamedGlobal(VFTableName); - if (!VTable) { - // Create a backing variable for the contents of VTable. The VTable may - // or may not include space for a pointer to RTTI data. - llvm::GlobalValue *VFTable = VTable = new llvm::GlobalVariable( - CGM.getModule(), VTableType, /*isConstant=*/true, VTableLinkage, - /*Initializer=*/nullptr, VTableName); - VTable->setUnnamedAddr(true); - - // Only insert a pointer into the VFTable for RTTI data if we are not - // importing it. We never reference the RTTI data directly so there is no - // need to make room for it. - if (getContext().getLangOpts().RTTIData && - !RD->hasAttr<DLLImportAttr>()) { - llvm::Value *GEPIndices[] = {llvm::ConstantInt::get(CGM.IntTy, 0), - llvm::ConstantInt::get(CGM.IntTy, 1)}; - // Create a GEP which points just after the first entry in the VFTable, - // this should be the location of the first virtual method. - llvm::Constant *VTableGEP = - llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, GEPIndices); - // The symbol for the VFTable is an alias to the GEP. It is - // transparent, to other modules, what the nature of this symbol is; all - // that matters is that the alias be the address of the first virtual - // method. - VFTable = llvm::GlobalAlias::create( - cast<llvm::SequentialType>(VTableGEP->getType())->getElementType(), - /*AddressSpace=*/0, llvm::GlobalValue::ExternalLinkage, - VFTableName.str(), VTableGEP, &CGM.getModule()); - } else { - // We don't need a GlobalAlias to be a symbol for the VTable if we won't - // be referencing any RTTI data. The GlobalVariable will end up being - // an appropriate definition of the VFTable. - VTable->setName(VFTableName.str()); - } - - VFTable->setUnnamedAddr(true); - if (RD->hasAttr<DLLImportAttr>()) - VFTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass); - else if (RD->hasAttr<DLLExportAttr>()) - VFTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass); - - llvm::GlobalValue::LinkageTypes VFTableLinkage = CGM.getVTableLinkage(RD); - if (VFTable != VTable) { - if (llvm::GlobalValue::isAvailableExternallyLinkage(VFTableLinkage)) { - // AvailableExternally implies that we grabbed the data from another - // executable. No need to stick the alias in a Comdat. - } else if (llvm::GlobalValue::isInternalLinkage(VFTableLinkage) || - llvm::GlobalValue::isWeakODRLinkage(VFTableLinkage) || - llvm::GlobalValue::isLinkOnceODRLinkage(VFTableLinkage)) { - // The alias is going to be dropped into a Comdat, no need to make it - // weak. - if (!llvm::GlobalValue::isInternalLinkage(VFTableLinkage)) - VFTableLinkage = llvm::GlobalValue::ExternalLinkage; - llvm::Comdat *C = - CGM.getModule().getOrInsertComdat(VFTable->getName()); - // We must indicate which VFTable is larger to support linking between - // translation units which do and do not have RTTI data. The largest - // VFTable contains the RTTI data; translation units which reference - // the smaller VFTable always reference it relative to the first - // virtual method. - C->setSelectionKind(llvm::Comdat::Largest); - VTable->setComdat(C); - } else { - llvm_unreachable("unexpected linkage for vftable!"); - } - } - VFTable->setLinkage(VFTableLinkage); - CGM.setGlobalVisibility(VFTable, RD); - VFTablesMap[ID] = VFTable; + VPtrInfo *const *VFPtrI = + std::find_if(VFPtrs.begin(), VFPtrs.end(), [&](VPtrInfo *VPI) { + return VPI->FullOffsetInMDC == VPtrOffset; + }); + if (VFPtrI == VFPtrs.end()) { + VFTablesMap[ID] = nullptr; + return nullptr; + } + VPtrInfo *VFPtr = *VFPtrI; + + SmallString<256> VFTableName; + mangleVFTableName(getMangleContext(), RD, VFPtr, VFTableName); + + llvm::GlobalValue::LinkageTypes VFTableLinkage = CGM.getVTableLinkage(RD); + bool VFTableComesFromAnotherTU = + llvm::GlobalValue::isAvailableExternallyLinkage(VFTableLinkage) || + llvm::GlobalValue::isExternalLinkage(VFTableLinkage); + bool VTableAliasIsRequred = + !VFTableComesFromAnotherTU && getContext().getLangOpts().RTTIData; + + if (llvm::GlobalValue *VFTable = + CGM.getModule().getNamedGlobal(VFTableName)) { + VFTablesMap[ID] = VFTable; + return VTableAliasIsRequred + ? cast<llvm::GlobalVariable>( + cast<llvm::GlobalAlias>(VFTable)->getBaseObject()) + : cast<llvm::GlobalVariable>(VFTable); + } + + uint64_t NumVTableSlots = + VTContext.getVFTableLayout(RD, VFPtr->FullOffsetInMDC) + .getNumVTableComponents(); + llvm::GlobalValue::LinkageTypes VTableLinkage = + VTableAliasIsRequred ? llvm::GlobalValue::PrivateLinkage : VFTableLinkage; + + StringRef VTableName = VTableAliasIsRequred ? StringRef() : VFTableName.str(); + + llvm::ArrayType *VTableType = + llvm::ArrayType::get(CGM.Int8PtrTy, NumVTableSlots); + + // Create a backing variable for the contents of VTable. The VTable may + // or may not include space for a pointer to RTTI data. + llvm::GlobalValue *VFTable; + VTable = new llvm::GlobalVariable(CGM.getModule(), VTableType, + /*isConstant=*/true, VTableLinkage, + /*Initializer=*/nullptr, VTableName); + VTable->setUnnamedAddr(true); + + llvm::Comdat *C = nullptr; + if (!VFTableComesFromAnotherTU && + (llvm::GlobalValue::isWeakForLinker(VFTableLinkage) || + (llvm::GlobalValue::isLocalLinkage(VFTableLinkage) && + VTableAliasIsRequred))) + C = CGM.getModule().getOrInsertComdat(VFTableName.str()); + + // Only insert a pointer into the VFTable for RTTI data if we are not + // importing it. We never reference the RTTI data directly so there is no + // need to make room for it. + if (VTableAliasIsRequred) { + llvm::Value *GEPIndices[] = {llvm::ConstantInt::get(CGM.IntTy, 0), + llvm::ConstantInt::get(CGM.IntTy, 1)}; + // Create a GEP which points just after the first entry in the VFTable, + // this should be the location of the first virtual method. + llvm::Constant *VTableGEP = llvm::ConstantExpr::getInBoundsGetElementPtr( + VTable->getValueType(), VTable, GEPIndices); + if (llvm::GlobalValue::isWeakForLinker(VFTableLinkage)) { + VFTableLinkage = llvm::GlobalValue::ExternalLinkage; + if (C) + C->setSelectionKind(llvm::Comdat::Largest); } - break; + VFTable = llvm::GlobalAlias::create( + cast<llvm::PointerType>(VTableGEP->getType()), VFTableLinkage, + VFTableName.str(), VTableGEP, &CGM.getModule()); + VFTable->setUnnamedAddr(true); + } else { + // We don't need a GlobalAlias to be a symbol for the VTable if we won't + // be referencing any RTTI data. + // The GlobalVariable will end up being an appropriate definition of the + // VFTable. + VFTable = VTable; } + if (C) + VTable->setComdat(C); + + if (RD->hasAttr<DLLImportAttr>()) + VFTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass); + else if (RD->hasAttr<DLLExportAttr>()) + VFTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass); + VFTablesMap[ID] = VFTable; return VTable; } @@ -1464,7 +1618,7 @@ llvm::Value *MicrosoftCXXABI::EmitVirtualDestructorCall( llvm::Type *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo); llvm::Value *Callee = getVirtualFunctionPointer(CGF, GD, This, Ty); - ASTContext &Context = CGF.getContext(); + ASTContext &Context = getContext(); llvm::Value *ImplicitParam = llvm::ConstantInt::get( llvm::IntegerType::getInt32Ty(CGF.getLLVMContext()), DtorType == Dtor_Deleting); @@ -1530,6 +1684,8 @@ llvm::Function *MicrosoftCXXABI::EmitVirtualMemPtrThunk( ThunkFn->setLinkage(MD->isExternallyVisible() ? llvm::GlobalValue::LinkOnceODRLinkage : llvm::GlobalValue::InternalLinkage); + if (MD->isExternallyVisible()) + ThunkFn->setComdat(CGM.getModule().getOrInsertComdat(ThunkFn->getName())); CGM.SetLLVMFunctionAttributes(MD, FnInfo, ThunkFn); CGM.SetLLVMFunctionAttributesForDefinition(MD, ThunkFn); @@ -1576,7 +1732,8 @@ void MicrosoftCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) { for (unsigned I = 0, E = VBGlobals.VBTables->size(); I != E; ++I) { const VPtrInfo *VBT = (*VBGlobals.VBTables)[I]; llvm::GlobalVariable *GV = VBGlobals.Globals[I]; - emitVBTableDefinition(*VBT, RD, GV); + if (GV->isDeclaration()) + emitVBTableDefinition(*VBT, RD, GV); } } @@ -1603,6 +1760,9 @@ MicrosoftCXXABI::getAddrOfVBTable(const VPtrInfo &VBT, const CXXRecordDecl *RD, else if (RD->hasAttr<DLLExportAttr>()) GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass); + if (!GV->hasExternalLinkage()) + emitVBTableDefinition(VBT, RD, GV); + return GV; } @@ -1615,9 +1775,8 @@ void MicrosoftCXXABI::emitVBTableDefinition(const VPtrInfo &VBT, "should only emit vbtables for classes with vbtables"); const ASTRecordLayout &BaseLayout = - CGM.getContext().getASTRecordLayout(VBT.BaseWithVPtr); - const ASTRecordLayout &DerivedLayout = - CGM.getContext().getASTRecordLayout(RD); + getContext().getASTRecordLayout(VBT.BaseWithVPtr); + const ASTRecordLayout &DerivedLayout = getContext().getASTRecordLayout(RD); SmallVector<llvm::Constant *, 4> Offsets(1 + ReusingBase->getNumVBases(), nullptr); @@ -1651,9 +1810,6 @@ void MicrosoftCXXABI::emitVBTableDefinition(const VPtrInfo &VBT, llvm::ArrayType::get(CGM.IntTy, Offsets.size()); llvm::Constant *Init = llvm::ConstantArray::get(VBTableType, Offsets); GV->setInitializer(Init); - - // Set the right visibility. - CGM.setGlobalVisibility(GV, RD); } llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF, @@ -1709,8 +1865,8 @@ MicrosoftCXXABI::performReturnAdjustment(CodeGenFunction &CGF, llvm::Value *Ret, if (RA.Virtual.Microsoft.VBIndex) { assert(RA.Virtual.Microsoft.VBIndex > 0); - int32_t IntSize = - getContext().getTypeSizeInChars(getContext().IntTy).getQuantity(); + const ASTContext &Context = getContext(); + int32_t IntSize = Context.getTypeSizeInChars(Context.IntTy).getQuantity(); llvm::Value *VBPtr; llvm::Value *VBaseOffset = GetVBaseOffsetFromVBPtr(CGF, V, RA.Virtual.Microsoft.VBPtrOffset, @@ -1719,7 +1875,7 @@ MicrosoftCXXABI::performReturnAdjustment(CodeGenFunction &CGF, llvm::Value *Ret, } if (RA.NonVirtual) - V = CGF.Builder.CreateConstInBoundsGEP1_32(V, RA.NonVirtual); + V = CGF.Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, V, RA.NonVirtual); // Cast back to the original type. return CGF.Builder.CreateBitCast(V, Ret->getType()); @@ -1835,18 +1991,10 @@ void MicrosoftCXXABI::EmitThreadLocalInitFuncs( llvm::Function *F = CXXThreadLocalInits[I]; // If the GV is already in a comdat group, then we have to join it. - llvm::Comdat *C = GV->getComdat(); - - // LinkOnce and Weak linkage are lowered down to a single-member comdat - // group. - // Make an explicit group so we can join it. - if (!C && (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage())) { - C = CGM.getModule().getOrInsertComdat(GV->getName()); - GV->setComdat(C); + if (llvm::Comdat *C = GV->getComdat()) AddToXDU(F)->setComdat(C); - } else { + else NonComdatInits.push_back(F); - } } if (!NonComdatInits.empty()) { @@ -1868,6 +2016,81 @@ LValue MicrosoftCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, return LValue(); } +static llvm::GlobalVariable *getInitThreadEpochPtr(CodeGenModule &CGM) { + StringRef VarName("_Init_thread_epoch"); + if (auto *GV = CGM.getModule().getNamedGlobal(VarName)) + return GV; + auto *GV = new llvm::GlobalVariable( + CGM.getModule(), CGM.IntTy, + /*Constant=*/false, llvm::GlobalVariable::ExternalLinkage, + /*Initializer=*/nullptr, VarName, + /*InsertBefore=*/nullptr, llvm::GlobalVariable::GeneralDynamicTLSModel); + GV->setAlignment(CGM.getTarget().getIntAlign() / 8); + return GV; +} + +static llvm::Constant *getInitThreadHeaderFn(CodeGenModule &CGM) { + llvm::FunctionType *FTy = + llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()), + CGM.IntTy->getPointerTo(), /*isVarArg=*/false); + return CGM.CreateRuntimeFunction( + FTy, "_Init_thread_header", + llvm::AttributeSet::get(CGM.getLLVMContext(), + llvm::AttributeSet::FunctionIndex, + llvm::Attribute::NoUnwind)); +} + +static llvm::Constant *getInitThreadFooterFn(CodeGenModule &CGM) { + llvm::FunctionType *FTy = + llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()), + CGM.IntTy->getPointerTo(), /*isVarArg=*/false); + return CGM.CreateRuntimeFunction( + FTy, "_Init_thread_footer", + llvm::AttributeSet::get(CGM.getLLVMContext(), + llvm::AttributeSet::FunctionIndex, + llvm::Attribute::NoUnwind)); +} + +static llvm::Constant *getInitThreadAbortFn(CodeGenModule &CGM) { + llvm::FunctionType *FTy = + llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()), + CGM.IntTy->getPointerTo(), /*isVarArg=*/false); + return CGM.CreateRuntimeFunction( + FTy, "_Init_thread_abort", + llvm::AttributeSet::get(CGM.getLLVMContext(), + llvm::AttributeSet::FunctionIndex, + llvm::Attribute::NoUnwind)); +} + +namespace { +struct ResetGuardBit : EHScopeStack::Cleanup { + llvm::GlobalVariable *Guard; + unsigned GuardNum; + ResetGuardBit(llvm::GlobalVariable *Guard, unsigned GuardNum) + : Guard(Guard), GuardNum(GuardNum) {} + + void Emit(CodeGenFunction &CGF, Flags flags) override { + // Reset the bit in the mask so that the static variable may be + // reinitialized. + CGBuilderTy &Builder = CGF.Builder; + llvm::LoadInst *LI = Builder.CreateLoad(Guard); + llvm::ConstantInt *Mask = + llvm::ConstantInt::get(CGF.IntTy, ~(1U << GuardNum)); + Builder.CreateStore(Builder.CreateAnd(LI, Mask), Guard); + } +}; + +struct CallInitThreadAbort : EHScopeStack::Cleanup { + llvm::GlobalVariable *Guard; + CallInitThreadAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {} + + void Emit(CodeGenFunction &CGF, Flags flags) override { + // Calling _Init_thread_abort will reset the guard's state. + CGF.EmitNounwindRuntimeCall(getInitThreadAbortFn(CGF.CGM), Guard); + } +}; +} + void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D, llvm::GlobalVariable *GV, bool PerformInit) { @@ -1875,91 +2098,161 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D, if (!D.isStaticLocal()) { assert(GV->hasWeakLinkage() || GV->hasLinkOnceLinkage()); // GlobalOpt is allowed to discard the initializer, so use linkonce_odr. - CGF.CurFn->setLinkage(llvm::GlobalValue::LinkOnceODRLinkage); + llvm::Function *F = CGF.CurFn; + F->setLinkage(llvm::GlobalValue::LinkOnceODRLinkage); + F->setComdat(CGM.getModule().getOrInsertComdat(F->getName())); CGF.EmitCXXGlobalVarDeclInit(D, GV, PerformInit); return; } - // MSVC always uses an i32 bitfield to guard initialization, which is *not* - // threadsafe. Since the user may be linking in inline functions compiled by - // cl.exe, there's no reason to provide a false sense of security by using - // critical sections here. + bool ThreadlocalStatic = D.getTLSKind(); + bool ThreadsafeStatic = getContext().getLangOpts().ThreadsafeStatics; - if (D.getTLSKind()) - CGM.ErrorUnsupported(&D, "dynamic TLS initialization"); + // Thread-safe static variables which aren't thread-specific have a + // per-variable guard. + bool HasPerVariableGuard = ThreadsafeStatic && !ThreadlocalStatic; CGBuilderTy &Builder = CGF.Builder; llvm::IntegerType *GuardTy = CGF.Int32Ty; llvm::ConstantInt *Zero = llvm::ConstantInt::get(GuardTy, 0); // Get the guard variable for this function if we have one already. - GuardInfo *GI = &GuardVariableMap[D.getDeclContext()]; - - unsigned BitIndex; - if (D.isStaticLocal() && D.isExternallyVisible()) { + GuardInfo *GI = nullptr; + if (ThreadlocalStatic) + GI = &ThreadLocalGuardVariableMap[D.getDeclContext()]; + else if (!ThreadsafeStatic) + GI = &GuardVariableMap[D.getDeclContext()]; + + llvm::GlobalVariable *GuardVar = GI ? GI->Guard : nullptr; + unsigned GuardNum; + if (D.isExternallyVisible()) { // Externally visible variables have to be numbered in Sema to properly // handle unreachable VarDecls. - BitIndex = getContext().getStaticLocalNumber(&D); - assert(BitIndex > 0); - BitIndex--; + GuardNum = getContext().getStaticLocalNumber(&D); + assert(GuardNum > 0); + GuardNum--; + } else if (HasPerVariableGuard) { + GuardNum = ThreadSafeGuardNumMap[D.getDeclContext()]++; } else { // Non-externally visible variables are numbered here in CodeGen. - BitIndex = GI->BitIndex++; + GuardNum = GI->BitIndex++; } - if (BitIndex >= 32) { + if (!HasPerVariableGuard && GuardNum >= 32) { if (D.isExternallyVisible()) ErrorUnsupportedABI(CGF, "more than 32 guarded initializations"); - BitIndex %= 32; - GI->Guard = nullptr; + GuardNum %= 32; + GuardVar = nullptr; } - // Lazily create the i32 bitfield for this function. - if (!GI->Guard) { + if (!GuardVar) { // Mangle the name for the guard. SmallString<256> GuardName; { llvm::raw_svector_ostream Out(GuardName); - getMangleContext().mangleStaticGuardVariable(&D, Out); + if (HasPerVariableGuard) + getMangleContext().mangleThreadSafeStaticGuardVariable(&D, GuardNum, + Out); + else + getMangleContext().mangleStaticGuardVariable(&D, Out); Out.flush(); } // Create the guard variable with a zero-initializer. Just absorb linkage, // visibility and dll storage class from the guarded variable. - GI->Guard = - new llvm::GlobalVariable(CGM.getModule(), GuardTy, false, + GuardVar = + new llvm::GlobalVariable(CGM.getModule(), GuardTy, /*isConstant=*/false, GV->getLinkage(), Zero, GuardName.str()); - GI->Guard->setVisibility(GV->getVisibility()); - GI->Guard->setDLLStorageClass(GV->getDLLStorageClass()); - } else { - assert(GI->Guard->getLinkage() == GV->getLinkage() && - "static local from the same function had different linkage"); - } - - // Pseudo code for the test: - // if (!(GuardVar & MyGuardBit)) { - // GuardVar |= MyGuardBit; - // ... initialize the object ...; - // } - - // Test our bit from the guard variable. - llvm::ConstantInt *Bit = llvm::ConstantInt::get(GuardTy, 1U << BitIndex); - llvm::LoadInst *LI = Builder.CreateLoad(GI->Guard); - llvm::Value *IsInitialized = - Builder.CreateICmpNE(Builder.CreateAnd(LI, Bit), Zero); - llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init"); - llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end"); - Builder.CreateCondBr(IsInitialized, EndBlock, InitBlock); + GuardVar->setVisibility(GV->getVisibility()); + GuardVar->setDLLStorageClass(GV->getDLLStorageClass()); + if (GuardVar->isWeakForLinker()) + GuardVar->setComdat( + CGM.getModule().getOrInsertComdat(GuardVar->getName())); + if (D.getTLSKind()) + GuardVar->setThreadLocal(true); + if (GI && !HasPerVariableGuard) + GI->Guard = GuardVar; + } + + assert(GuardVar->getLinkage() == GV->getLinkage() && + "static local from the same function had different linkage"); + + if (!HasPerVariableGuard) { + // Pseudo code for the test: + // if (!(GuardVar & MyGuardBit)) { + // GuardVar |= MyGuardBit; + // ... initialize the object ...; + // } + + // Test our bit from the guard variable. + llvm::ConstantInt *Bit = llvm::ConstantInt::get(GuardTy, 1U << GuardNum); + llvm::LoadInst *LI = Builder.CreateLoad(GuardVar); + llvm::Value *IsInitialized = + Builder.CreateICmpNE(Builder.CreateAnd(LI, Bit), Zero); + llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init"); + llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end"); + Builder.CreateCondBr(IsInitialized, EndBlock, InitBlock); + + // Set our bit in the guard variable and emit the initializer and add a global + // destructor if appropriate. + CGF.EmitBlock(InitBlock); + Builder.CreateStore(Builder.CreateOr(LI, Bit), GuardVar); + CGF.EHStack.pushCleanup<ResetGuardBit>(EHCleanup, GuardVar, GuardNum); + CGF.EmitCXXGlobalVarDeclInit(D, GV, PerformInit); + CGF.PopCleanupBlock(); + Builder.CreateBr(EndBlock); - // Set our bit in the guard variable and emit the initializer and add a global - // destructor if appropriate. - CGF.EmitBlock(InitBlock); - Builder.CreateStore(Builder.CreateOr(LI, Bit), GI->Guard); - CGF.EmitCXXGlobalVarDeclInit(D, GV, PerformInit); - Builder.CreateBr(EndBlock); + // Continue. + CGF.EmitBlock(EndBlock); + } else { + // Pseudo code for the test: + // if (TSS > _Init_thread_epoch) { + // _Init_thread_header(&TSS); + // if (TSS == -1) { + // ... initialize the object ...; + // _Init_thread_footer(&TSS); + // } + // } + // + // The algorithm is almost identical to what can be found in the appendix + // found in N2325. + + unsigned IntAlign = CGM.getTarget().getIntAlign() / 8; + + // This BasicBLock determines whether or not we have any work to do. + llvm::LoadInst *FirstGuardLoad = + Builder.CreateAlignedLoad(GuardVar, IntAlign); + FirstGuardLoad->setOrdering(llvm::AtomicOrdering::Unordered); + llvm::LoadInst *InitThreadEpoch = + Builder.CreateLoad(getInitThreadEpochPtr(CGM)); + llvm::Value *IsUninitialized = + Builder.CreateICmpSGT(FirstGuardLoad, InitThreadEpoch); + llvm::BasicBlock *AttemptInitBlock = CGF.createBasicBlock("init.attempt"); + llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end"); + Builder.CreateCondBr(IsUninitialized, AttemptInitBlock, EndBlock); + + // This BasicBlock attempts to determine whether or not this thread is + // responsible for doing the initialization. + CGF.EmitBlock(AttemptInitBlock); + CGF.EmitNounwindRuntimeCall(getInitThreadHeaderFn(CGM), GuardVar); + llvm::LoadInst *SecondGuardLoad = + Builder.CreateAlignedLoad(GuardVar, IntAlign); + SecondGuardLoad->setOrdering(llvm::AtomicOrdering::Unordered); + llvm::Value *ShouldDoInit = + Builder.CreateICmpEQ(SecondGuardLoad, getAllOnesInt()); + llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init"); + Builder.CreateCondBr(ShouldDoInit, InitBlock, EndBlock); + + // Ok, we ended up getting selected as the initializing thread. + CGF.EmitBlock(InitBlock); + CGF.EHStack.pushCleanup<CallInitThreadAbort>(EHCleanup, GuardVar); + CGF.EmitCXXGlobalVarDeclInit(D, GV, PerformInit); + CGF.PopCleanupBlock(); + CGF.EmitNounwindRuntimeCall(getInitThreadFooterFn(CGM), GuardVar); + Builder.CreateBr(EndBlock); - // Continue. - CGF.EmitBlock(EndBlock); + CGF.EmitBlock(EndBlock); + } } bool MicrosoftCXXABI::isZeroInitializable(const MemberPointerType *MPT) { @@ -2040,8 +2333,8 @@ llvm::Constant * MicrosoftCXXABI::EmitFullMemberPointer(llvm::Constant *FirstField, bool IsMemberFunction, const CXXRecordDecl *RD, - CharUnits NonVirtualBaseAdjustment) -{ + CharUnits NonVirtualBaseAdjustment, + unsigned VBTableIndex) { MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel(); // Single inheritance class member pointer are represented as scalars instead @@ -2065,7 +2358,7 @@ MicrosoftCXXABI::EmitFullMemberPointer(llvm::Constant *FirstField, // The rest of the fields are adjusted by conversions to a more derived class. if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance)) - fields.push_back(getZeroInt()); + fields.push_back(llvm::ConstantInt::get(CGM.IntTy, VBTableIndex)); return llvm::ConstantStruct::getAnon(fields); } @@ -2077,7 +2370,7 @@ MicrosoftCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT, llvm::Constant *FirstField = llvm::ConstantInt::get(CGM.IntTy, offset.getQuantity()); return EmitFullMemberPointer(FirstField, /*IsMemberFunction=*/false, RD, - CharUnits::Zero()); + CharUnits::Zero(), /*VBTableIndex=*/0); } llvm::Constant *MicrosoftCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) { @@ -2113,6 +2406,7 @@ MicrosoftCXXABI::BuildMemberPointer(const CXXRecordDecl *RD, RD = RD->getMostRecentDecl(); CodeGenTypes &Types = CGM.getTypes(); + unsigned VBTableIndex = 0; llvm::Constant *FirstField; const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); if (!MD->isVirtual()) { @@ -2129,8 +2423,6 @@ MicrosoftCXXABI::BuildMemberPointer(const CXXRecordDecl *RD, FirstField = CGM.GetAddrOfFunction(MD, Ty); FirstField = llvm::ConstantExpr::getBitCast(FirstField, CGM.VoidPtrTy); } else { - MicrosoftVTableContext::MethodVFTableLocation ML = - CGM.getMicrosoftVTableContext().getMethodVFTableLocation(MD); if (!CGM.getTypes().isFuncTypeConvertible( MD->getType()->castAs<FunctionType>())) { CGM.ErrorUnsupported(MD, "pointer to virtual member function with " @@ -2139,21 +2431,22 @@ MicrosoftCXXABI::BuildMemberPointer(const CXXRecordDecl *RD, } else if (FPT->getCallConv() == CC_X86FastCall) { CGM.ErrorUnsupported(MD, "pointer to fastcall virtual member function"); FirstField = llvm::Constant::getNullValue(CGM.VoidPtrTy); - } else if (ML.VBase) { - CGM.ErrorUnsupported(MD, "pointer to virtual member function overriding " - "member function in virtual base class"); - FirstField = llvm::Constant::getNullValue(CGM.VoidPtrTy); } else { + auto &VTableContext = CGM.getMicrosoftVTableContext(); + MicrosoftVTableContext::MethodVFTableLocation ML = + VTableContext.getMethodVFTableLocation(MD); llvm::Function *Thunk = EmitVirtualMemPtrThunk(MD, ML); FirstField = llvm::ConstantExpr::getBitCast(Thunk, CGM.VoidPtrTy); // Include the vfptr adjustment if the method is in a non-primary vftable. NonVirtualBaseAdjustment += ML.VFPtrOffset; + if (ML.VBase) + VBTableIndex = VTableContext.getVBTableIndex(RD, ML.VBase) * 4; } } // The rest of the fields are common with data member pointers. return EmitFullMemberPointer(FirstField, /*IsMemberFunction=*/true, RD, - NonVirtualBaseAdjustment); + NonVirtualBaseAdjustment, VBTableIndex); } /// Member pointers are the same if they're either bitwise identical *or* both @@ -2867,7 +3160,11 @@ llvm::GlobalVariable *MSRTTIBuilder::getClassHierarchyDescriptor() { auto Type = ABI.getClassHierarchyDescriptorType(); auto CHD = new llvm::GlobalVariable(Module, Type, /*Constant=*/true, Linkage, /*Initializer=*/nullptr, - MangledName.c_str()); + StringRef(MangledName)); + if (CHD->isWeakForLinker()) + CHD->setComdat(CGM.getModule().getOrInsertComdat(CHD->getName())); + + auto *Bases = getBaseClassArray(Classes); // Initialize the base class ClassHierarchyDescriptor. llvm::Constant *Fields[] = { @@ -2875,7 +3172,7 @@ llvm::GlobalVariable *MSRTTIBuilder::getClassHierarchyDescriptor() { llvm::ConstantInt::get(CGM.IntTy, Flags), llvm::ConstantInt::get(CGM.IntTy, Classes.size()), ABI.getImageRelativeConstant(llvm::ConstantExpr::getInBoundsGetElementPtr( - getBaseClassArray(Classes), + Bases->getValueType(), Bases, llvm::ArrayRef<llvm::Value *>(GEPIndices))), }; CHD->setInitializer(llvm::ConstantStruct::get(Type, Fields)); @@ -2898,9 +3195,12 @@ MSRTTIBuilder::getBaseClassArray(SmallVectorImpl<MSRTTIClass> &Classes) { llvm::Type *PtrType = ABI.getImageRelativeType( ABI.getBaseClassDescriptorType()->getPointerTo()); auto *ArrType = llvm::ArrayType::get(PtrType, Classes.size() + 1); - auto *BCA = new llvm::GlobalVariable( - Module, ArrType, - /*Constant=*/true, Linkage, /*Initializer=*/nullptr, MangledName.c_str()); + auto *BCA = + new llvm::GlobalVariable(Module, ArrType, + /*Constant=*/true, Linkage, + /*Initializer=*/nullptr, StringRef(MangledName)); + if (BCA->isWeakForLinker()) + BCA->setComdat(CGM.getModule().getOrInsertComdat(BCA->getName())); // Initialize the BaseClassArray. SmallVector<llvm::Constant *, 8> BaseClassArrayData; @@ -2938,9 +3238,11 @@ MSRTTIBuilder::getBaseClassDescriptor(const MSRTTIClass &Class) { // Forward-declare the base class descriptor. auto Type = ABI.getBaseClassDescriptorType(); - auto BCD = new llvm::GlobalVariable(Module, Type, /*Constant=*/true, Linkage, - /*Initializer=*/nullptr, - MangledName.c_str()); + auto BCD = + new llvm::GlobalVariable(Module, Type, /*Constant=*/true, Linkage, + /*Initializer=*/nullptr, StringRef(MangledName)); + if (BCD->isWeakForLinker()) + BCD->setComdat(CGM.getModule().getOrInsertComdat(BCD->getName())); // Initialize the BaseClassDescriptor. llvm::Constant *Fields[] = { @@ -2984,7 +3286,7 @@ MSRTTIBuilder::getCompleteObjectLocator(const VPtrInfo *Info) { // Forward-declare the complete object locator. llvm::StructType *Type = ABI.getCompleteObjectLocatorType(); auto COL = new llvm::GlobalVariable(Module, Type, /*Constant=*/true, Linkage, - /*Initializer=*/nullptr, MangledName.c_str()); + /*Initializer=*/nullptr, StringRef(MangledName)); // Initialize the CompleteObjectLocator. llvm::Constant *Fields[] = { @@ -3000,15 +3302,91 @@ MSRTTIBuilder::getCompleteObjectLocator(const VPtrInfo *Info) { if (!ABI.isImageRelative()) FieldsRef = FieldsRef.drop_back(); COL->setInitializer(llvm::ConstantStruct::get(Type, FieldsRef)); + if (COL->isWeakForLinker()) + COL->setComdat(CGM.getModule().getOrInsertComdat(COL->getName())); return COL; } +static QualType decomposeTypeForEH(ASTContext &Context, QualType T, + bool &IsConst, bool &IsVolatile) { + T = Context.getExceptionObjectType(T); + + // C++14 [except.handle]p3: + // A handler is a match for an exception object of type E if [...] + // - the handler is of type cv T or const T& where T is a pointer type and + // E is a pointer type that can be converted to T by [...] + // - a qualification conversion + IsConst = false; + IsVolatile = false; + QualType PointeeType = T->getPointeeType(); + if (!PointeeType.isNull()) { + IsConst = PointeeType.isConstQualified(); + IsVolatile = PointeeType.isVolatileQualified(); + } + + // Member pointer types like "const int A::*" are represented by having RTTI + // for "int A::*" and separately storing the const qualifier. + if (const auto *MPTy = T->getAs<MemberPointerType>()) + T = Context.getMemberPointerType(PointeeType.getUnqualifiedType(), + MPTy->getClass()); + + // Pointer types like "const int * const *" are represented by having RTTI + // for "const int **" and separately storing the const qualifier. + if (T->isPointerType()) + T = Context.getPointerType(PointeeType.getUnqualifiedType()); + + return T; +} + +llvm::Constant * +MicrosoftCXXABI::getAddrOfCXXCatchHandlerType(QualType Type, + QualType CatchHandlerType) { + // TypeDescriptors for exceptions never have qualified pointer types, + // qualifiers are stored seperately in order to support qualification + // conversions. + bool IsConst, IsVolatile; + Type = decomposeTypeForEH(getContext(), Type, IsConst, IsVolatile); + + bool IsReference = CatchHandlerType->isReferenceType(); + + uint32_t Flags = 0; + if (IsConst) + Flags |= 1; + if (IsVolatile) + Flags |= 2; + if (IsReference) + Flags |= 8; + + SmallString<256> MangledName; + { + llvm::raw_svector_ostream Out(MangledName); + getMangleContext().mangleCXXCatchHandlerType(Type, Flags, Out); + } + + if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(MangledName)) + return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy); + + llvm::Constant *Fields[] = { + llvm::ConstantInt::get(CGM.IntTy, Flags), // Flags + getAddrOfRTTIDescriptor(Type), // TypeDescriptor + }; + llvm::StructType *CatchHandlerTypeType = getCatchHandlerTypeType(); + auto *Var = new llvm::GlobalVariable( + CGM.getModule(), CatchHandlerTypeType, /*Constant=*/true, + llvm::GlobalValue::PrivateLinkage, + llvm::ConstantStruct::get(CatchHandlerTypeType, Fields), + StringRef(MangledName)); + Var->setUnnamedAddr(true); + Var->setSection("llvm.metadata"); + return Var; +} + /// \brief Gets a TypeDescriptor. Returns a llvm::Constant * rather than a /// llvm::GlobalVariable * because different type descriptors have different /// types, and need to be abstracted. They are abstracting by casting the /// address to an Int8PtrTy. llvm::Constant *MicrosoftCXXABI::getAddrOfRTTIDescriptor(QualType Type) { - SmallString<256> MangledName, TypeInfoString; + SmallString<256> MangledName; { llvm::raw_svector_ostream Out(MangledName); getMangleContext().mangleCXXRTTI(Type, Out); @@ -3019,6 +3397,7 @@ llvm::Constant *MicrosoftCXXABI::getAddrOfRTTIDescriptor(QualType Type) { return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy); // Compute the fields for the TypeDescriptor. + SmallString<256> TypeInfoString; { llvm::raw_svector_ostream Out(TypeInfoString); getMangleContext().mangleCXXRTTIName(Type, Out); @@ -3031,13 +3410,14 @@ llvm::Constant *MicrosoftCXXABI::getAddrOfRTTIDescriptor(QualType Type) { llvm::ConstantDataArray::getString(CGM.getLLVMContext(), TypeInfoString)}; llvm::StructType *TypeDescriptorType = getTypeDescriptorType(TypeInfoString); - return llvm::ConstantExpr::getBitCast( - new llvm::GlobalVariable( - CGM.getModule(), TypeDescriptorType, /*Constant=*/false, - getLinkageForRTTI(Type), - llvm::ConstantStruct::get(TypeDescriptorType, Fields), - MangledName.c_str()), - CGM.Int8PtrTy); + auto *Var = new llvm::GlobalVariable( + CGM.getModule(), TypeDescriptorType, /*Constant=*/false, + getLinkageForRTTI(Type), + llvm::ConstantStruct::get(TypeDescriptorType, Fields), + StringRef(MangledName)); + if (Var->isWeakForLinker()) + Var->setComdat(CGM.getModule().getOrInsertComdat(Var->getName())); + return llvm::ConstantExpr::getBitCast(Var, CGM.Int8PtrTy); } /// \brief Gets or a creates a Microsoft CompleteObjectLocator. @@ -3051,7 +3431,8 @@ static void emitCXXConstructor(CodeGenModule &CGM, const CXXConstructorDecl *ctor, StructorType ctorType) { // There are no constructor variants, always emit the complete destructor. - CGM.codegenCXXStructor(ctor, StructorType::Complete); + llvm::Function *Fn = CGM.codegenCXXStructor(ctor, StructorType::Complete); + CGM.maybeSetTrivialComdat(*ctor, *Fn); } static void emitCXXDestructor(CodeGenModule &CGM, const CXXDestructorDecl *dtor, @@ -3077,7 +3458,9 @@ static void emitCXXDestructor(CodeGenModule &CGM, const CXXDestructorDecl *dtor, if (dtorType == StructorType::Base && !CGM.TryEmitBaseDestructorAsAlias(dtor)) return; - CGM.codegenCXXStructor(dtor, dtorType); + llvm::Function *Fn = CGM.codegenCXXStructor(dtor, dtorType); + if (Fn->isWeakForLinker()) + Fn->setComdat(CGM.getModule().getOrInsertComdat(Fn->getName())); } void MicrosoftCXXABI::emitCXXStructor(const CXXMethodDecl *MD, @@ -3088,3 +3471,400 @@ void MicrosoftCXXABI::emitCXXStructor(const CXXMethodDecl *MD, } emitCXXDestructor(CGM, cast<CXXDestructorDecl>(MD), Type); } + +llvm::Function * +MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD, + CXXCtorType CT) { + assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); + + // Calculate the mangled name. + SmallString<256> ThunkName; + llvm::raw_svector_ostream Out(ThunkName); + getMangleContext().mangleCXXCtor(CD, CT, Out); + Out.flush(); + + // If the thunk has been generated previously, just return it. + if (llvm::GlobalValue *GV = CGM.getModule().getNamedValue(ThunkName)) + return cast<llvm::Function>(GV); + + // Create the llvm::Function. + const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeMSCtorClosure(CD, CT); + llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(FnInfo); + const CXXRecordDecl *RD = CD->getParent(); + QualType RecordTy = getContext().getRecordType(RD); + llvm::Function *ThunkFn = llvm::Function::Create( + ThunkTy, getLinkageForRTTI(RecordTy), ThunkName.str(), &CGM.getModule()); + ThunkFn->setCallingConv(static_cast<llvm::CallingConv::ID>( + FnInfo.getEffectiveCallingConvention())); + bool IsCopy = CT == Ctor_CopyingClosure; + + // Start codegen. + CodeGenFunction CGF(CGM); + CGF.CurGD = GlobalDecl(CD, Ctor_Complete); + + // Build FunctionArgs. + FunctionArgList FunctionArgs; + + // A constructor always starts with a 'this' pointer as its first argument. + buildThisParam(CGF, FunctionArgs); + + // Following the 'this' pointer is a reference to the source object that we + // are copying from. + ImplicitParamDecl SrcParam( + getContext(), nullptr, SourceLocation(), &getContext().Idents.get("src"), + getContext().getLValueReferenceType(RecordTy, + /*SpelledAsLValue=*/true)); + if (IsCopy) + FunctionArgs.push_back(&SrcParam); + + // Constructors for classes which utilize virtual bases have an additional + // parameter which indicates whether or not it is being delegated to by a more + // derived constructor. + ImplicitParamDecl IsMostDerived(getContext(), nullptr, SourceLocation(), + &getContext().Idents.get("is_most_derived"), + getContext().IntTy); + // Only add the parameter to the list if thie class has virtual bases. + if (RD->getNumVBases() > 0) + FunctionArgs.push_back(&IsMostDerived); + + // Start defining the function. + CGF.StartFunction(GlobalDecl(), FnInfo.getReturnType(), ThunkFn, FnInfo, + FunctionArgs, CD->getLocation(), SourceLocation()); + EmitThisParam(CGF); + llvm::Value *This = getThisValue(CGF); + + llvm::Value *SrcVal = + IsCopy ? CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&SrcParam), "src") + : nullptr; + + CallArgList Args; + + // Push the this ptr. + Args.add(RValue::get(This), CD->getThisType(getContext())); + + // Push the src ptr. + if (SrcVal) + Args.add(RValue::get(SrcVal), SrcParam.getType()); + + // Add the rest of the default arguments. + std::vector<Stmt *> ArgVec; + for (unsigned I = IsCopy ? 1 : 0, E = CD->getNumParams(); I != E; ++I) { + Stmt *DefaultArg = getContext().getDefaultArgExprForConstructor(CD, I); + assert(DefaultArg && "sema forgot to instantiate default args"); + ArgVec.push_back(DefaultArg); + } + + CodeGenFunction::RunCleanupsScope Cleanups(CGF); + + const auto *FPT = CD->getType()->castAs<FunctionProtoType>(); + ConstExprIterator ArgBegin(ArgVec.data()), + ArgEnd(ArgVec.data() + ArgVec.size()); + CGF.EmitCallArgs(Args, FPT, ArgBegin, ArgEnd, CD, IsCopy ? 1 : 0); + + // Insert any ABI-specific implicit constructor arguments. + unsigned ExtraArgs = addImplicitConstructorArgs(CGF, CD, Ctor_Complete, + /*ForVirtualBase=*/false, + /*Delegating=*/false, Args); + + // Call the destructor with our arguments. + llvm::Value *CalleeFn = CGM.getAddrOfCXXStructor(CD, StructorType::Complete); + const CGFunctionInfo &CalleeInfo = CGM.getTypes().arrangeCXXConstructorCall( + Args, CD, Ctor_Complete, ExtraArgs); + CGF.EmitCall(CalleeInfo, CalleeFn, ReturnValueSlot(), Args, CD); + + Cleanups.ForceCleanup(); + + // Emit the ret instruction, remove any temporary instructions created for the + // aid of CodeGen. + CGF.FinishFunction(SourceLocation()); + + return ThunkFn; +} + +llvm::Constant *MicrosoftCXXABI::getCatchableType(QualType T, + uint32_t NVOffset, + int32_t VBPtrOffset, + uint32_t VBIndex) { + assert(!T->isReferenceType()); + + CXXRecordDecl *RD = T->getAsCXXRecordDecl(); + const CXXConstructorDecl *CD = + RD ? CGM.getContext().getCopyConstructorForExceptionObject(RD) : nullptr; + CXXCtorType CT = Ctor_Complete; + if (CD) + if (!hasDefaultCXXMethodCC(getContext(), CD) || CD->getNumParams() != 1) + CT = Ctor_CopyingClosure; + + uint32_t Size = getContext().getTypeSizeInChars(T).getQuantity(); + SmallString<256> MangledName; + { + llvm::raw_svector_ostream Out(MangledName); + getMangleContext().mangleCXXCatchableType(T, CD, CT, Size, NVOffset, + VBPtrOffset, VBIndex, Out); + } + if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(MangledName)) + return getImageRelativeConstant(GV); + + // The TypeDescriptor is used by the runtime to determine if a catch handler + // is appropriate for the exception object. + llvm::Constant *TD = getImageRelativeConstant(getAddrOfRTTIDescriptor(T)); + + // The runtime is responsible for calling the copy constructor if the + // exception is caught by value. + llvm::Constant *CopyCtor; + if (CD) { + if (CT == Ctor_CopyingClosure) + CopyCtor = getAddrOfCXXCtorClosure(CD, Ctor_CopyingClosure); + else + CopyCtor = CGM.getAddrOfCXXStructor(CD, StructorType::Complete); + + CopyCtor = llvm::ConstantExpr::getBitCast(CopyCtor, CGM.Int8PtrTy); + } else { + CopyCtor = llvm::Constant::getNullValue(CGM.Int8PtrTy); + } + CopyCtor = getImageRelativeConstant(CopyCtor); + + bool IsScalar = !RD; + bool HasVirtualBases = false; + bool IsStdBadAlloc = false; // std::bad_alloc is special for some reason. + QualType PointeeType = T; + if (T->isPointerType()) + PointeeType = T->getPointeeType(); + if (const CXXRecordDecl *RD = PointeeType->getAsCXXRecordDecl()) { + HasVirtualBases = RD->getNumVBases() > 0; + if (IdentifierInfo *II = RD->getIdentifier()) + IsStdBadAlloc = II->isStr("bad_alloc") && RD->isInStdNamespace(); + } + + // Encode the relevant CatchableType properties into the Flags bitfield. + // FIXME: Figure out how bits 2 or 8 can get set. + uint32_t Flags = 0; + if (IsScalar) + Flags |= 1; + if (HasVirtualBases) + Flags |= 4; + if (IsStdBadAlloc) + Flags |= 16; + + llvm::Constant *Fields[] = { + llvm::ConstantInt::get(CGM.IntTy, Flags), // Flags + TD, // TypeDescriptor + llvm::ConstantInt::get(CGM.IntTy, NVOffset), // NonVirtualAdjustment + llvm::ConstantInt::get(CGM.IntTy, VBPtrOffset), // OffsetToVBPtr + llvm::ConstantInt::get(CGM.IntTy, VBIndex), // VBTableIndex + llvm::ConstantInt::get(CGM.IntTy, Size), // Size + CopyCtor // CopyCtor + }; + llvm::StructType *CTType = getCatchableTypeType(); + auto *GV = new llvm::GlobalVariable( + CGM.getModule(), CTType, /*Constant=*/true, getLinkageForRTTI(T), + llvm::ConstantStruct::get(CTType, Fields), StringRef(MangledName)); + GV->setUnnamedAddr(true); + GV->setSection(".xdata"); + if (GV->isWeakForLinker()) + GV->setComdat(CGM.getModule().getOrInsertComdat(GV->getName())); + return getImageRelativeConstant(GV); +} + +llvm::GlobalVariable *MicrosoftCXXABI::getCatchableTypeArray(QualType T) { + assert(!T->isReferenceType()); + + // See if we've already generated a CatchableTypeArray for this type before. + llvm::GlobalVariable *&CTA = CatchableTypeArrays[T]; + if (CTA) + return CTA; + + // Ensure that we don't have duplicate entries in our CatchableTypeArray by + // using a SmallSetVector. Duplicates may arise due to virtual bases + // occurring more than once in the hierarchy. + llvm::SmallSetVector<llvm::Constant *, 2> CatchableTypes; + + // C++14 [except.handle]p3: + // A handler is a match for an exception object of type E if [...] + // - the handler is of type cv T or cv T& and T is an unambiguous public + // base class of E, or + // - the handler is of type cv T or const T& where T is a pointer type and + // E is a pointer type that can be converted to T by [...] + // - a standard pointer conversion (4.10) not involving conversions to + // pointers to private or protected or ambiguous classes + const CXXRecordDecl *MostDerivedClass = nullptr; + bool IsPointer = T->isPointerType(); + if (IsPointer) + MostDerivedClass = T->getPointeeType()->getAsCXXRecordDecl(); + else + MostDerivedClass = T->getAsCXXRecordDecl(); + + // Collect all the unambiguous public bases of the MostDerivedClass. + if (MostDerivedClass) { + const ASTContext &Context = getContext(); + const ASTRecordLayout &MostDerivedLayout = + Context.getASTRecordLayout(MostDerivedClass); + MicrosoftVTableContext &VTableContext = CGM.getMicrosoftVTableContext(); + SmallVector<MSRTTIClass, 8> Classes; + serializeClassHierarchy(Classes, MostDerivedClass); + Classes.front().initialize(/*Parent=*/nullptr, /*Specifier=*/nullptr); + detectAmbiguousBases(Classes); + for (const MSRTTIClass &Class : Classes) { + // Skip any ambiguous or private bases. + if (Class.Flags & + (MSRTTIClass::IsPrivateOnPath | MSRTTIClass::IsAmbiguous)) + continue; + // Write down how to convert from a derived pointer to a base pointer. + uint32_t OffsetInVBTable = 0; + int32_t VBPtrOffset = -1; + if (Class.VirtualRoot) { + OffsetInVBTable = + VTableContext.getVBTableIndex(MostDerivedClass, Class.VirtualRoot)*4; + VBPtrOffset = MostDerivedLayout.getVBPtrOffset().getQuantity(); + } + + // Turn our record back into a pointer if the exception object is a + // pointer. + QualType RTTITy = QualType(Class.RD->getTypeForDecl(), 0); + if (IsPointer) + RTTITy = Context.getPointerType(RTTITy); + CatchableTypes.insert(getCatchableType(RTTITy, Class.OffsetInVBase, + VBPtrOffset, OffsetInVBTable)); + } + } + + // C++14 [except.handle]p3: + // A handler is a match for an exception object of type E if + // - The handler is of type cv T or cv T& and E and T are the same type + // (ignoring the top-level cv-qualifiers) + CatchableTypes.insert(getCatchableType(T)); + + // C++14 [except.handle]p3: + // A handler is a match for an exception object of type E if + // - the handler is of type cv T or const T& where T is a pointer type and + // E is a pointer type that can be converted to T by [...] + // - a standard pointer conversion (4.10) not involving conversions to + // pointers to private or protected or ambiguous classes + // + // C++14 [conv.ptr]p2: + // A prvalue of type "pointer to cv T," where T is an object type, can be + // converted to a prvalue of type "pointer to cv void". + if (IsPointer && T->getPointeeType()->isObjectType()) + CatchableTypes.insert(getCatchableType(getContext().VoidPtrTy)); + + // C++14 [except.handle]p3: + // A handler is a match for an exception object of type E if [...] + // - the handler is of type cv T or const T& where T is a pointer or + // pointer to member type and E is std::nullptr_t. + // + // We cannot possibly list all possible pointer types here, making this + // implementation incompatible with the standard. However, MSVC includes an + // entry for pointer-to-void in this case. Let's do the same. + if (T->isNullPtrType()) + CatchableTypes.insert(getCatchableType(getContext().VoidPtrTy)); + + uint32_t NumEntries = CatchableTypes.size(); + llvm::Type *CTType = + getImageRelativeType(getCatchableTypeType()->getPointerTo()); + llvm::ArrayType *AT = llvm::ArrayType::get(CTType, NumEntries); + llvm::StructType *CTAType = getCatchableTypeArrayType(NumEntries); + llvm::Constant *Fields[] = { + llvm::ConstantInt::get(CGM.IntTy, NumEntries), // NumEntries + llvm::ConstantArray::get( + AT, llvm::makeArrayRef(CatchableTypes.begin(), + CatchableTypes.end())) // CatchableTypes + }; + SmallString<256> MangledName; + { + llvm::raw_svector_ostream Out(MangledName); + getMangleContext().mangleCXXCatchableTypeArray(T, NumEntries, Out); + } + CTA = new llvm::GlobalVariable( + CGM.getModule(), CTAType, /*Constant=*/true, getLinkageForRTTI(T), + llvm::ConstantStruct::get(CTAType, Fields), StringRef(MangledName)); + CTA->setUnnamedAddr(true); + CTA->setSection(".xdata"); + if (CTA->isWeakForLinker()) + CTA->setComdat(CGM.getModule().getOrInsertComdat(CTA->getName())); + return CTA; +} + +llvm::GlobalVariable *MicrosoftCXXABI::getThrowInfo(QualType T) { + bool IsConst, IsVolatile; + T = decomposeTypeForEH(getContext(), T, IsConst, IsVolatile); + + // The CatchableTypeArray enumerates the various (CV-unqualified) types that + // the exception object may be caught as. + llvm::GlobalVariable *CTA = getCatchableTypeArray(T); + // The first field in a CatchableTypeArray is the number of CatchableTypes. + // This is used as a component of the mangled name which means that we need to + // know what it is in order to see if we have previously generated the + // ThrowInfo. + uint32_t NumEntries = + cast<llvm::ConstantInt>(CTA->getInitializer()->getAggregateElement(0U)) + ->getLimitedValue(); + + SmallString<256> MangledName; + { + llvm::raw_svector_ostream Out(MangledName); + getMangleContext().mangleCXXThrowInfo(T, IsConst, IsVolatile, NumEntries, + Out); + } + + // Reuse a previously generated ThrowInfo if we have generated an appropriate + // one before. + if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(MangledName)) + return GV; + + // The RTTI TypeDescriptor uses an unqualified type but catch clauses must + // be at least as CV qualified. Encode this requirement into the Flags + // bitfield. + uint32_t Flags = 0; + if (IsConst) + Flags |= 1; + if (IsVolatile) + Flags |= 2; + + // The cleanup-function (a destructor) must be called when the exception + // object's lifetime ends. + llvm::Constant *CleanupFn = llvm::Constant::getNullValue(CGM.Int8PtrTy); + if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) + if (CXXDestructorDecl *DtorD = RD->getDestructor()) + if (!DtorD->isTrivial()) + CleanupFn = llvm::ConstantExpr::getBitCast( + CGM.getAddrOfCXXStructor(DtorD, StructorType::Complete), + CGM.Int8PtrTy); + // This is unused as far as we can tell, initialize it to null. + llvm::Constant *ForwardCompat = + getImageRelativeConstant(llvm::Constant::getNullValue(CGM.Int8PtrTy)); + llvm::Constant *PointerToCatchableTypes = getImageRelativeConstant( + llvm::ConstantExpr::getBitCast(CTA, CGM.Int8PtrTy)); + llvm::StructType *TIType = getThrowInfoType(); + llvm::Constant *Fields[] = { + llvm::ConstantInt::get(CGM.IntTy, Flags), // Flags + getImageRelativeConstant(CleanupFn), // CleanupFn + ForwardCompat, // ForwardCompat + PointerToCatchableTypes // CatchableTypeArray + }; + auto *GV = new llvm::GlobalVariable( + CGM.getModule(), TIType, /*Constant=*/true, getLinkageForRTTI(T), + llvm::ConstantStruct::get(TIType, Fields), StringRef(MangledName)); + GV->setUnnamedAddr(true); + GV->setSection(".xdata"); + if (GV->isWeakForLinker()) + GV->setComdat(CGM.getModule().getOrInsertComdat(GV->getName())); + return GV; +} + +void MicrosoftCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) { + const Expr *SubExpr = E->getSubExpr(); + QualType ThrowType = SubExpr->getType(); + // The exception object lives on the stack and it's address is passed to the + // runtime function. + llvm::AllocaInst *AI = CGF.CreateMemTemp(ThrowType); + CGF.EmitAnyExprToMem(SubExpr, AI, ThrowType.getQualifiers(), + /*IsInit=*/true); + + // The so-called ThrowInfo is used to describe how the exception object may be + // caught. + llvm::GlobalVariable *TI = getThrowInfo(ThrowType); + + // Call into the runtime to throw the exception. + llvm::Value *Args[] = {CGF.Builder.CreateBitCast(AI, CGM.Int8PtrTy), TI}; + CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(), Args); +} diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp index 4f1a82e..25e5740 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp @@ -63,7 +63,7 @@ namespace { CoverageInfo(CoverageInfo), M(new llvm::Module(ModuleName, C)) {} - virtual ~CodeGeneratorImpl() { + ~CodeGeneratorImpl() override { // There should normally not be any leftover inline method definitions. assert(DeferredInlineMethodDefinitions.empty() || Diags.hasErrorOccurred()); @@ -95,7 +95,8 @@ namespace { M->setTargetTriple(Ctx->getTargetInfo().getTriple().getTriple()); M->setDataLayout(Ctx->getTargetInfo().getTargetDescription()); - TD.reset(new llvm::DataLayout(Ctx->getTargetInfo().getTargetDescription())); + TD.reset( + new llvm::DataLayout(Ctx->getTargetInfo().getTargetDescription())); Builder.reset(new CodeGen::CodeGenModule(Context, CodeGenOpts, *M, *TD, Diags, CoverageInfo)); @@ -211,11 +212,11 @@ namespace { Builder->EmitTentativeDefinition(D); } - void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired) override { + void HandleVTable(CXXRecordDecl *RD) override { if (Diags.hasErrorOccurred()) return; - Builder->EmitVTable(RD, DefinitionRequired); + Builder->EmitVTable(RD); } void HandleLinkerOptionPragma(llvm::StringRef Opts) override { @@ -238,7 +239,6 @@ void CodeGenerator::anchor() { } CodeGenerator *clang::CreateLLVMCodeGen(DiagnosticsEngine &Diags, const std::string& ModuleName, const CodeGenOptions &CGO, - const TargetOptions &/*TO*/, llvm::LLVMContext& C, CoverageSourceInfo *CoverageInfo) { return new CodeGeneratorImpl(Diags, ModuleName, CGO, C, CoverageInfo); diff --git a/contrib/llvm/tools/clang/lib/CodeGen/SanitizerMetadata.h b/contrib/llvm/tools/clang/lib/CodeGen/SanitizerMetadata.h index d2f0651..166f0e6 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/SanitizerMetadata.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/SanitizerMetadata.h @@ -31,8 +31,8 @@ namespace CodeGen { class CodeGenModule; class SanitizerMetadata { - SanitizerMetadata(const SanitizerMetadata &) LLVM_DELETED_FUNCTION; - void operator=(const SanitizerMetadata &) LLVM_DELETED_FUNCTION; + SanitizerMetadata(const SanitizerMetadata &) = delete; + void operator=(const SanitizerMetadata &) = delete; CodeGenModule &CGM; public: diff --git a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp index c05b23a..53154b5 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp @@ -37,7 +37,8 @@ static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, unsigned LastIndex) { // Alternatively, we could emit this as a loop in the source. for (unsigned I = FirstIndex; I <= LastIndex; ++I) { - llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I); + llvm::Value *Cell = + Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I); Builder.CreateStore(Value, Cell); } } @@ -107,6 +108,10 @@ bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base, return false; } +bool ABIInfo::shouldSignExtUnsignedType(QualType Ty) const { + return false; +} + void ABIArgInfo::dump() const { raw_ostream &OS = llvm::errs(); OS << "(ABIArgInfo Kind="; @@ -238,7 +243,7 @@ static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) { /// \return The field declaration for the single non-empty field, if /// it exists. static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { - const RecordType *RT = T->getAsStructureType(); + const RecordType *RT = T->getAs<RecordType>(); if (!RT) return nullptr; @@ -339,9 +344,15 @@ static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) { // // FIXME: This needs to be generalized to handle classes as well. const RecordDecl *RD = RT->getDecl(); - if (!RD->isStruct() || isa<CXXRecordDecl>(RD)) + if (!RD->isStruct()) return false; + // We try to expand CLike CXXRecordDecl. + if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { + if (!CXXRD->isCLike()) + return false; + } + uint64_t Size = 0; for (const auto *FD : RD->fields()) { @@ -399,8 +410,16 @@ llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, } ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { - if (isAggregateTypeForABI(Ty)) + Ty = useFirstFieldIfTransparentUnion(Ty); + + if (isAggregateTypeForABI(Ty)) { + // Records with non-trivial destructors/copy-constructors should not be + // passed by value. + if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) + return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); + return ABIArgInfo::getIndirect(0); + } // Treat an enum type as its underlying type. if (const EnumType *EnumTy = Ty->getAs<EnumType>()) @@ -1352,7 +1371,8 @@ bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( } else { // 9 is %eflags, which doesn't get a size on Darwin for some // reason. - Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9)); + Builder.CreateStore( + Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9)); // 11-16 are st(0..5). Not sure why we stop at 5. // These have size 12, which is sizeof(long double) on @@ -1475,14 +1495,13 @@ class X86_64ABIInfo : public ABIInfo { return !getTarget().getTriple().isOSDarwin(); } - bool HasAVX; // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on // 64-bit hardware. bool Has64BitPointers; public: - X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) : - ABIInfo(CGT), HasAVX(hasavx), + X86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : + ABIInfo(CGT), Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) { } @@ -1503,6 +1522,14 @@ public: llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const override; + + bool has64BitPointers() const { + return Has64BitPointers; + } + + bool hasAVX() const { + return getTarget().getABI() == "avx"; + } }; /// WinX86_64ABIInfo - The Windows X86_64 ABI information. @@ -1532,10 +1559,9 @@ public: }; class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { - bool HasAVX; public: - X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) - : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)), HasAVX(HasAVX) {} + X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) + : TargetCodeGenInfo(new X86_64ABIInfo(CGT)) {} const X86_64ABIInfo &getABIInfo() const { return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo()); @@ -1588,24 +1614,47 @@ public: llvm::Constant * getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override { - unsigned Sig = (0xeb << 0) | // jmp rel8 - (0x0a << 8) | // .+0x0c - ('F' << 16) | - ('T' << 24); + unsigned Sig; + if (getABIInfo().has64BitPointers()) + Sig = (0xeb << 0) | // jmp rel8 + (0x0a << 8) | // .+0x0c + ('F' << 16) | + ('T' << 24); + else + Sig = (0xeb << 0) | // jmp rel8 + (0x06 << 8) | // .+0x08 + ('F' << 16) | + ('T' << 24); return llvm::ConstantInt::get(CGM.Int32Ty, Sig); } unsigned getOpenMPSimdDefaultAlignment(QualType) const override { - return HasAVX ? 32 : 16; + return getABIInfo().hasAVX() ? 32 : 16; + } +}; + +class PS4TargetCodeGenInfo : public X86_64TargetCodeGenInfo { +public: + PS4TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) + : X86_64TargetCodeGenInfo(CGT) {} + + void getDependentLibraryOption(llvm::StringRef Lib, + llvm::SmallString<24> &Opt) const override { + Opt = "\01"; + Opt += Lib; } }; static std::string qualifyWindowsLibrary(llvm::StringRef Lib) { - // If the argument does not end in .lib, automatically add the suffix. This - // matches the behavior of MSVC. - std::string ArgStr = Lib; + // If the argument does not end in .lib, automatically add the suffix. + // If the argument contains a space, enclose it in quotes. + // This matches the behavior of MSVC. + bool Quote = (Lib.find(" ") != StringRef::npos); + std::string ArgStr = Quote ? "\"" : ""; + ArgStr += Lib; if (!Lib.endswith_lower(".lib")) ArgStr += ".lib"; + ArgStr += Quote ? "\"" : ""; return ArgStr; } @@ -1615,6 +1664,9 @@ public: bool d, bool p, bool w, unsigned RegParms) : X86_32TargetCodeGenInfo(CGT, d, p, w, RegParms) {} + void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, + CodeGen::CodeGenModule &CGM) const override; + void getDependentLibraryOption(llvm::StringRef Lib, llvm::SmallString<24> &Opt) const override { Opt = "/DEFAULTLIB:"; @@ -1628,11 +1680,35 @@ public: } }; +static void addStackProbeSizeTargetAttribute(const Decl *D, + llvm::GlobalValue *GV, + CodeGen::CodeGenModule &CGM) { + if (isa<FunctionDecl>(D)) { + if (CGM.getCodeGenOpts().StackProbeSize != 4096) { + llvm::Function *Fn = cast<llvm::Function>(GV); + + Fn->addFnAttr("stack-probe-size", llvm::utostr(CGM.getCodeGenOpts().StackProbeSize)); + } + } +} + +void WinX86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D, + llvm::GlobalValue *GV, + CodeGen::CodeGenModule &CGM) const { + X86_32TargetCodeGenInfo::SetTargetAttributes(D, GV, CGM); + + addStackProbeSizeTargetAttribute(D, GV, CGM); +} + class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo { - bool HasAVX; + bool hasAVX() const { return getABIInfo().getTarget().getABI() == "avx"; } + public: - WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) - : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)), HasAVX(HasAVX) {} + WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT) + : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {} + + void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, + CodeGen::CodeGenModule &CGM) const override; int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override { return 7; @@ -1661,10 +1737,17 @@ public: } unsigned getOpenMPSimdDefaultAlignment(QualType) const override { - return HasAVX ? 32 : 16; + return hasAVX() ? 32 : 16; } }; +void WinX86_64TargetCodeGenInfo::SetTargetAttributes(const Decl *D, + llvm::GlobalValue *GV, + CodeGen::CodeGenModule &CGM) const { + TargetCodeGenInfo::SetTargetAttributes(D, GV, CGM); + + addStackProbeSizeTargetAttribute(D, GV, CGM); +} } void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo, @@ -1843,7 +1926,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, // split. if (OffsetBase && OffsetBase != 64) Hi = Lo; - } else if (Size == 128 || (HasAVX && isNamedArg && Size == 256)) { + } else if (Size == 128 || (hasAVX() && isNamedArg && Size == 256)) { // Arguments of 256-bits are split into four eightbyte chunks. The // least significant one belongs to class SSE and all the others to class // SSEUP. The original Lo and Hi design considers that types can't be @@ -2065,7 +2148,7 @@ ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const { if (const VectorType *VecTy = Ty->getAs<VectorType>()) { uint64_t Size = getContext().getTypeSize(VecTy); - unsigned LargestVector = HasAVX ? 256 : 128; + unsigned LargestVector = hasAVX() ? 256 : 128; if (Size <= 64 || Size > LargestVector) return true; } @@ -2142,20 +2225,9 @@ llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const { Ty = QualType(InnerTy, 0); llvm::Type *IRType = CGT.ConvertType(Ty); - - // If the preferred type is a 16-byte vector, prefer to pass it. - if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){ - llvm::Type *EltTy = VT->getElementType(); - unsigned BitWidth = VT->getBitWidth(); - if ((BitWidth >= 128 && BitWidth <= 256) && - (EltTy->isFloatTy() || EltTy->isDoubleTy() || - EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) || - EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) || - EltTy->isIntegerTy(128))) - return VT; - } - - return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2); + assert(isa<llvm::VectorType>(IRType) && + "Trying to return a non-vector type in a vector register!"); + return IRType; } /// BitsContainNoUserData - Return true if the specified [start,end) bit range @@ -2717,8 +2789,8 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) { - llvm::Value *overflow_arg_area_p = - CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); + llvm::Value *overflow_arg_area_p = CGF.Builder.CreateStructGEP( + nullptr, VAListAddr, 2, "overflow_arg_area_p"); llvm::Value *overflow_arg_area = CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); @@ -2798,14 +2870,16 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, llvm::Value *gp_offset_p = nullptr, *gp_offset = nullptr; llvm::Value *fp_offset_p = nullptr, *fp_offset = nullptr; if (neededInt) { - gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); + gp_offset_p = + CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 0, "gp_offset_p"); gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); } if (neededSSE) { - fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); + fp_offset_p = + CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 1, "fp_offset_p"); fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); llvm::Value *FitsInFP = llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); @@ -2833,9 +2907,8 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, // simple assembling of a structure from scattered addresses has many more // loads than necessary. Can we clean this up? llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); - llvm::Value *RegAddr = - CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), - "reg_save_area"); + llvm::Value *RegAddr = CGF.Builder.CreateLoad( + CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 3), "reg_save_area"); if (neededInt && neededSSE) { // FIXME: Cleanup. assert(AI.isDirect() && "Unexpected ABI info for mixed regs"); @@ -2855,9 +2928,9 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr; llvm::Value *V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); - CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); + CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 0)); V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); - CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); + CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 1)); RegAddr = CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(LTy)); @@ -2894,10 +2967,10 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo()); V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, DblPtrTy)); - CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); + CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 0)); V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, DblPtrTy)); - CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); + CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 1)); RegAddr = CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(LTy)); } @@ -2984,7 +3057,7 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs, return ABIArgInfo::getDirect(); } - if (RT || Ty->isMemberPointerType()) { + if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) { // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is // not 1, 2, 4, or 8 bytes, must be passed by reference." if (Width > 64 || !llvm::isPowerOf2_64(Width)) @@ -3040,48 +3113,6 @@ llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, return AddrTyped; } -namespace { - -class NaClX86_64ABIInfo : public ABIInfo { - public: - NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) - : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {} - void computeInfo(CGFunctionInfo &FI) const override; - llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, - CodeGenFunction &CGF) const override; - private: - PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. - X86_64ABIInfo NInfo; // Used for everything else. -}; - -class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo { - bool HasAVX; - public: - NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX) - : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)), HasAVX(HasAVX) { - } - unsigned getOpenMPSimdDefaultAlignment(QualType) const override { - return HasAVX ? 32 : 16; - } -}; - -} - -void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const { - if (FI.getASTCallingConvention() == CC_PnaclCall) - PInfo.computeInfo(FI); - else - NInfo.computeInfo(FI); -} - -llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, - CodeGenFunction &CGF) const { - // Always use the native convention; calling pnacl-style varargs functions - // is unuspported. - return NInfo.EmitVAArg(VAListAddr, Ty, CGF); -} - - // PowerPC-32 namespace { /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information. @@ -3257,13 +3288,42 @@ public: private: static const unsigned GPRBits = 64; ABIKind Kind; + bool HasQPX; + + // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and + // will be passed in a QPX register. + bool IsQPXVectorTy(const Type *Ty) const { + if (!HasQPX) + return false; + + if (const VectorType *VT = Ty->getAs<VectorType>()) { + unsigned NumElements = VT->getNumElements(); + if (NumElements == 1) + return false; + + if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) { + if (getContext().getTypeSize(Ty) <= 256) + return true; + } else if (VT->getElementType()-> + isSpecificBuiltinType(BuiltinType::Float)) { + if (getContext().getTypeSize(Ty) <= 128) + return true; + } + } + + return false; + } + + bool IsQPXVectorTy(QualType Ty) const { + return IsQPXVectorTy(Ty.getTypePtr()); + } public: - PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind) - : DefaultABIInfo(CGT), Kind(Kind) {} + PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX) + : DefaultABIInfo(CGT), Kind(Kind), HasQPX(HasQPX) {} bool isPromotableTypeForABI(QualType Ty) const; - bool isAlignedParamType(QualType Ty) const; + bool isAlignedParamType(QualType Ty, bool &Align32) const; ABIArgInfo classifyReturnType(QualType RetTy) const; ABIArgInfo classifyArgumentType(QualType Ty) const; @@ -3288,7 +3348,8 @@ public: const Type *T = isSingleElementStruct(I.type, getContext()); if (T) { const BuiltinType *BT = T->getAs<BuiltinType>(); - if ((T->isVectorType() && getContext().getTypeSize(T) == 128) || + if (IsQPXVectorTy(T) || + (T->isVectorType() && getContext().getTypeSize(T) == 128) || (BT && BT->isFloatingPoint())) { QualType QT(T, 0); I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT)); @@ -3304,10 +3365,13 @@ public: }; class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo { + bool HasQPX; + public: PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, - PPC64_SVR4_ABIInfo::ABIKind Kind) - : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind)) {} + PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX) + : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX)), + HasQPX(HasQPX) {} int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { // This is recovered from gcc output. @@ -3317,7 +3381,12 @@ public: bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const override; - unsigned getOpenMPSimdDefaultAlignment(QualType) const override { + unsigned getOpenMPSimdDefaultAlignment(QualType QT) const override { + if (HasQPX) + if (const PointerType *PT = QT->getAs<PointerType>()) + if (PT->getPointeeType()->isSpecificBuiltinType(BuiltinType::Double)) + return 32; // Natural alignment for QPX doubles. + return 16; // Natural alignment for Altivec and VSX vectors. } }; @@ -3370,15 +3439,23 @@ PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const { /// isAlignedParamType - Determine whether a type requires 16-byte /// alignment in the parameter area. bool -PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty) const { +PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty, bool &Align32) const { + Align32 = false; + // Complex types are passed just like their elements. if (const ComplexType *CTy = Ty->getAs<ComplexType>()) Ty = CTy->getElementType(); // Only vector types of size 16 bytes need alignment (larger types are // passed via reference, smaller types are not aligned). - if (Ty->isVectorType()) + if (IsQPXVectorTy(Ty)) { + if (getContext().getTypeSize(Ty) > 128) + Align32 = true; + + return true; + } else if (Ty->isVectorType()) { return getContext().getTypeSize(Ty) == 128; + } // For single-element float/vector structs, we consider the whole type // to have the same alignment requirements as its single element. @@ -3386,7 +3463,7 @@ PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty) const { const Type *EltType = isSingleElementStruct(Ty, getContext()); if (EltType) { const BuiltinType *BT = EltType->getAs<BuiltinType>(); - if ((EltType->isVectorType() && + if (IsQPXVectorTy(EltType) || (EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) || (BT && BT->isFloatingPoint())) AlignAsType = EltType; @@ -3400,13 +3477,22 @@ PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty) const { AlignAsType = Base; // With special case aggregates, only vector base types need alignment. - if (AlignAsType) + if (AlignAsType && IsQPXVectorTy(AlignAsType)) { + if (getContext().getTypeSize(AlignAsType) > 128) + Align32 = true; + + return true; + } else if (AlignAsType) { return AlignAsType->isVectorType(); + } // Otherwise, we only need alignment for any aggregate type that // has an alignment requirement of >= 16 bytes. - if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) + if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) { + if (HasQPX && getContext().getTypeAlign(Ty) >= 256) + Align32 = true; return true; + } return false; } @@ -3512,7 +3598,7 @@ bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const { return true; } if (const VectorType *VT = Ty->getAs<VectorType>()) { - if (getContext().getTypeSize(VT) == 128) + if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty)) return true; } return false; @@ -3538,7 +3624,7 @@ PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes) // or via reference (larger than 16 bytes). - if (Ty->isVectorType()) { + if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) { uint64_t Size = getContext().getTypeSize(Ty); if (Size > 128) return ABIArgInfo::getIndirect(0, /*ByVal=*/false); @@ -3552,7 +3638,9 @@ PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const { if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); - uint64_t ABIAlign = isAlignedParamType(Ty)? 16 : 8; + bool Align32; + uint64_t ABIAlign = isAlignedParamType(Ty, Align32) ? + (Align32 ? 32 : 16) : 8; uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8; // ELFv2 homogeneous aggregates are passed as array types. @@ -3609,7 +3697,7 @@ PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const { // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes) // or via reference (larger than 16 bytes). - if (RetTy->isVectorType()) { + if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) { uint64_t Size = getContext().getTypeSize(RetTy); if (Size > 128) return ABIArgInfo::getIndirect(0); @@ -3666,10 +3754,13 @@ llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr, llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); // Handle types that require 16-byte alignment in the parameter save area. - if (isAlignedParamType(Ty)) { + bool Align32; + if (isAlignedParamType(Ty, Align32)) { llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty); - AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(15)); - AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt64(-16)); + AddrAsInt = Builder.CreateAdd(AddrAsInt, + Builder.getInt64(Align32 ? 31 : 15)); + AddrAsInt = Builder.CreateAnd(AddrAsInt, + Builder.getInt64(Align32 ? -32 : -16)); Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align"); } @@ -3714,10 +3805,12 @@ llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr, ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy); llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal"); llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag"); - llvm::Value *Ptr = CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty), - "vacplx"); - llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, ".real"); - llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, ".imag"); + llvm::AllocaInst *Ptr = + CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty), "vacplx"); + llvm::Value *RealPtr = + Builder.CreateStructGEP(Ptr->getAllocatedType(), Ptr, 0, ".real"); + llvm::Value *ImagPtr = + Builder.CreateStructGEP(Ptr->getAllocatedType(), Ptr, 1, ".imag"); Builder.CreateStore(Real, RealPtr, false); Builder.CreateStore(Imag, ImagPtr, false); return Ptr; @@ -3837,8 +3930,8 @@ private: llvm::Value *EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const; - virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, - CodeGenFunction &CGF) const override { + llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, + CodeGenFunction &CGF) const override { return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF) : EmitAAPCSVAArg(VAListAddr, Ty, CGF); } @@ -3849,13 +3942,15 @@ public: AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind) : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {} - StringRef getARCRetainAutoreleasedReturnValueMarker() const { + StringRef getARCRetainAutoreleasedReturnValueMarker() const override { return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue"; } - int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { return 31; } + int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override { + return 31; + } - virtual bool doesReturnSlotInterfereWithArgs() const { return false; } + bool doesReturnSlotInterfereWithArgs() const override { return false; } }; } @@ -3964,7 +4059,15 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const { // Aggregates <= 16 bytes are returned directly in registers or on the stack. uint64_t Size = getContext().getTypeSize(RetTy); if (Size <= 128) { + unsigned Alignment = getContext().getTypeAlign(RetTy); Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes + + // We use a pair of i64 for 16-byte aggregate with 8-byte alignment. + // For aggregates with 16-byte alignment, we use i128. + if (Alignment < 128 && Size == 128) { + llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext()); + return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64)); + } return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size)); } @@ -4047,13 +4150,15 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr, int RegSize = IsIndirect ? 8 : getContext().getTypeSize(Ty) / 8; if (!IsFPR) { // 3 is the field number of __gr_offs - reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p"); + reg_offs_p = + CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 3, "gr_offs_p"); reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs"); reg_top_index = 1; // field number for __gr_top RegSize = llvm::RoundUpToAlignment(RegSize, 8); } else { // 4 is the field number of __vr_offs. - reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p"); + reg_offs_p = + CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 4, "vr_offs_p"); reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs"); reg_top_index = 2; // field number for __vr_top RegSize = 16 * NumRegs; @@ -4114,8 +4219,8 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr, CGF.EmitBlock(InRegBlock); llvm::Value *reg_top_p = nullptr, *reg_top = nullptr; - reg_top_p = - CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p"); + reg_top_p = CGF.Builder.CreateStructGEP(nullptr, VAListAddr, reg_top_index, + "reg_top_p"); reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top"); llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs); llvm::Value *RegAddr = nullptr; @@ -4138,7 +4243,7 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr, assert(!IsIndirect && "Homogeneous aggregates should be passed directly"); llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0)); llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers); - llvm::Value *Tmp = CGF.CreateTempAlloca(HFATy); + llvm::AllocaInst *Tmp = CGF.CreateTempAlloca(HFATy); int Offset = 0; if (CGF.CGM.getDataLayout().isBigEndian() && Ctx.getTypeSize(Base) < 128) @@ -4149,7 +4254,8 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr, llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset); LoadAddr = CGF.Builder.CreateBitCast( LoadAddr, llvm::PointerType::getUnqual(BaseTy)); - llvm::Value *StoreAddr = CGF.Builder.CreateStructGEP(Tmp, i); + llvm::Value *StoreAddr = + CGF.Builder.CreateStructGEP(Tmp->getAllocatedType(), Tmp, i); llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr); CGF.Builder.CreateStore(Elem, StoreAddr); @@ -4182,7 +4288,7 @@ llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr, CGF.EmitBlock(OnStackBlock); llvm::Value *stack_p = nullptr, *OnStackAddr = nullptr; - stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p"); + stack_p = CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 0, "stack_p"); OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack"); // Again, stack arguments may need realigmnent. In this case both integer and @@ -4324,17 +4430,10 @@ public: private: ABIKind Kind; - mutable int VFPRegs[16]; - const unsigned NumVFPs; - const unsigned NumGPRs; - mutable unsigned AllocatedGPRs; - mutable unsigned AllocatedVFPs; public: - ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind), - NumVFPs(16), NumGPRs(4) { + ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) { setCCs(); - resetAllocatedRegs(); } bool isEABI() const { @@ -4364,8 +4463,7 @@ public: private: ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const; - ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic, - bool &IsCPRC) const; + ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic) const; bool isIllegalVectorType(QualType Ty) const; bool isHomogeneousAggregateBaseType(QualType Ty) const override; @@ -4380,10 +4478,6 @@ private: llvm::CallingConv::ID getLLVMDefaultCC() const; llvm::CallingConv::ID getABIDefaultCC() const; void setCCs(); - - void markAllocatedGPRs(unsigned Alignment, unsigned NumRequired) const; - void markAllocatedVFPs(unsigned Alignment, unsigned NumRequired) const; - void resetAllocatedRegs(void) const; }; class ARMTargetCodeGenInfo : public TargetCodeGenInfo { @@ -4456,55 +4550,43 @@ public: } }; +class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo { + void addStackProbeSizeTargetAttribute(const Decl *D, llvm::GlobalValue *GV, + CodeGen::CodeGenModule &CGM) const; + +public: + WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K) + : ARMTargetCodeGenInfo(CGT, K) {} + + void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, + CodeGen::CodeGenModule &CGM) const override; +}; + +void WindowsARMTargetCodeGenInfo::addStackProbeSizeTargetAttribute( + const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { + if (!isa<FunctionDecl>(D)) + return; + if (CGM.getCodeGenOpts().StackProbeSize == 4096) + return; + + llvm::Function *F = cast<llvm::Function>(GV); + F->addFnAttr("stack-probe-size", + llvm::utostr(CGM.getCodeGenOpts().StackProbeSize)); +} + +void WindowsARMTargetCodeGenInfo::SetTargetAttributes( + const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const { + ARMTargetCodeGenInfo::SetTargetAttributes(D, GV, CGM); + addStackProbeSizeTargetAttribute(D, GV, CGM); +} } void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { - // To correctly handle Homogeneous Aggregate, we need to keep track of the - // VFP registers allocated so far. - // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive - // VFP registers of the appropriate type unallocated then the argument is - // allocated to the lowest-numbered sequence of such registers. - // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are - // unallocated are marked as unavailable. - resetAllocatedRegs(); - - if (getCXXABI().classifyReturnType(FI)) { - if (FI.getReturnInfo().isIndirect()) - markAllocatedGPRs(1, 1); - } else { + if (!getCXXABI().classifyReturnType(FI)) FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic()); - } - for (auto &I : FI.arguments()) { - unsigned PreAllocationVFPs = AllocatedVFPs; - unsigned PreAllocationGPRs = AllocatedGPRs; - bool IsCPRC = false; - // 6.1.2.3 There is one VFP co-processor register class using registers - // s0-s15 (d0-d7) for passing arguments. - I.info = classifyArgumentType(I.type, FI.isVariadic(), IsCPRC); - - // If we have allocated some arguments onto the stack (due to running - // out of VFP registers), we cannot split an argument between GPRs and - // the stack. If this situation occurs, we add padding to prevent the - // GPRs from being used. In this situation, the current argument could - // only be allocated by rule C.8, so rule C.6 would mark these GPRs as - // unusable anyway. - // We do not have to do this if the argument is being passed ByVal, as the - // backend can handle that situation correctly. - const bool StackUsed = PreAllocationGPRs > NumGPRs || PreAllocationVFPs > NumVFPs; - const bool IsByVal = I.info.isIndirect() && I.info.getIndirectByVal(); - if (!IsCPRC && PreAllocationGPRs < NumGPRs && AllocatedGPRs > NumGPRs && - StackUsed && !IsByVal) { - llvm::Type *PaddingTy = llvm::ArrayType::get( - llvm::Type::getInt32Ty(getVMContext()), NumGPRs - PreAllocationGPRs); - if (I.info.canHaveCoerceToType()) { - I.info = ABIArgInfo::getDirect(I.info.getCoerceToType() /* type */, - 0 /* offset */, PaddingTy, true); - } else { - I.info = ABIArgInfo::getDirect(nullptr /* type */, 0 /* offset */, - PaddingTy, true); - } - } - } + + for (auto &I : FI.arguments()) + I.info = classifyArgumentType(I.type, FI.isVariadic()); // Always honor user-specified calling convention. if (FI.getCallingConvention() != llvm::CallingConv::C) @@ -4512,7 +4594,7 @@ void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const { llvm::CallingConv::ID cc = getRuntimeCC(); if (cc != llvm::CallingConv::C) - FI.setEffectiveCallingConvention(cc); + FI.setEffectiveCallingConvention(cc); } /// Return the default calling convention that LLVM will use. @@ -4550,64 +4632,8 @@ void ARMABIInfo::setCCs() { llvm::CallingConv::ARM_APCS : llvm::CallingConv::ARM_AAPCS); } -/// markAllocatedVFPs - update VFPRegs according to the alignment and -/// number of VFP registers (unit is S register) requested. -void ARMABIInfo::markAllocatedVFPs(unsigned Alignment, - unsigned NumRequired) const { - // Early Exit. - if (AllocatedVFPs >= 16) { - // We use AllocatedVFP > 16 to signal that some CPRCs were allocated on - // the stack. - AllocatedVFPs = 17; - return; - } - // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive - // VFP registers of the appropriate type unallocated then the argument is - // allocated to the lowest-numbered sequence of such registers. - for (unsigned I = 0; I < 16; I += Alignment) { - bool FoundSlot = true; - for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++) - if (J >= 16 || VFPRegs[J]) { - FoundSlot = false; - break; - } - if (FoundSlot) { - for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++) - VFPRegs[J] = 1; - AllocatedVFPs += NumRequired; - return; - } - } - // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are - // unallocated are marked as unavailable. - for (unsigned I = 0; I < 16; I++) - VFPRegs[I] = 1; - AllocatedVFPs = 17; // We do not have enough VFP registers. -} - -/// Update AllocatedGPRs to record the number of general purpose registers -/// which have been allocated. It is valid for AllocatedGPRs to go above 4, -/// this represents arguments being stored on the stack. -void ARMABIInfo::markAllocatedGPRs(unsigned Alignment, - unsigned NumRequired) const { - assert((Alignment == 1 || Alignment == 2) && "Alignment must be 4 or 8 bytes"); - - if (Alignment == 2 && AllocatedGPRs & 0x1) - AllocatedGPRs += 1; - - AllocatedGPRs += NumRequired; -} - -void ARMABIInfo::resetAllocatedRegs(void) const { - AllocatedGPRs = 0; - AllocatedVFPs = 0; - for (unsigned i = 0; i < NumVFPs; ++i) - VFPRegs[i] = 0; -} - -ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic, - bool &IsCPRC) const { - // We update number of allocated VFPs according to +ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, + bool isVariadic) const { // 6.1.2.1 The following argument types are VFP CPRCs: // A single-precision floating-point type (including promoted // half-precision types); A double-precision floating-point type; @@ -4625,58 +4651,20 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic, if (Size <= 32) { llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext()); - markAllocatedGPRs(1, 1); return ABIArgInfo::getDirect(ResType); } if (Size == 64) { llvm::Type *ResType = llvm::VectorType::get( llvm::Type::getInt32Ty(getVMContext()), 2); - if (getABIKind() == ARMABIInfo::AAPCS || isVariadic){ - markAllocatedGPRs(2, 2); - } else { - markAllocatedVFPs(2, 2); - IsCPRC = true; - } return ABIArgInfo::getDirect(ResType); } if (Size == 128) { llvm::Type *ResType = llvm::VectorType::get( llvm::Type::getInt32Ty(getVMContext()), 4); - if (getABIKind() == ARMABIInfo::AAPCS || isVariadic) { - markAllocatedGPRs(2, 4); - } else { - markAllocatedVFPs(4, 4); - IsCPRC = true; - } return ABIArgInfo::getDirect(ResType); } - markAllocatedGPRs(1, 1); return ABIArgInfo::getIndirect(0, /*ByVal=*/false); } - // Update VFPRegs for legal vector types. - if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) { - if (const VectorType *VT = Ty->getAs<VectorType>()) { - uint64_t Size = getContext().getTypeSize(VT); - // Size of a legal vector should be power of 2 and above 64. - markAllocatedVFPs(Size >= 128 ? 4 : 2, Size / 32); - IsCPRC = true; - } - } - // Update VFPRegs for floating point types. - if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) { - if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) { - if (BT->getKind() == BuiltinType::Half || - BT->getKind() == BuiltinType::Float) { - markAllocatedVFPs(1, 1); - IsCPRC = true; - } - if (BT->getKind() == BuiltinType::Double || - BT->getKind() == BuiltinType::LongDouble) { - markAllocatedVFPs(2, 2); - IsCPRC = true; - } - } - } if (!isAggregateTypeForABI(Ty)) { // Treat an enum type as its underlying type. @@ -4684,15 +4672,11 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic, Ty = EnumTy->getDecl()->getIntegerType(); } - unsigned Size = getContext().getTypeSize(Ty); - if (!IsCPRC) - markAllocatedGPRs(Size > 32 ? 2 : 1, (Size + 31) / 32); return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); } if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) { - markAllocatedGPRs(1, 1); return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory); } @@ -4708,19 +4692,6 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic, if (isHomogeneousAggregate(Ty, Base, Members)) { assert(Base && "Base class should be set for homogeneous aggregate"); // Base can be a floating-point or a vector. - if (Base->isVectorType()) { - // ElementSize is in number of floats. - unsigned ElementSize = getContext().getTypeSize(Base) == 64 ? 2 : 4; - markAllocatedVFPs(ElementSize, - Members * ElementSize); - } else if (Base->isSpecificBuiltinType(BuiltinType::Float)) - markAllocatedVFPs(1, Members); - else { - assert(Base->isSpecificBuiltinType(BuiltinType::Double) || - Base->isSpecificBuiltinType(BuiltinType::LongDouble)); - markAllocatedVFPs(2, Members * 2); - } - IsCPRC = true; return ABIArgInfo::getDirect(nullptr, 0, nullptr, false); } } @@ -4732,15 +4703,11 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic, uint64_t ABIAlign = 4; uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8; if (getABIKind() == ARMABIInfo::AAPCS_VFP || - getABIKind() == ARMABIInfo::AAPCS) + getABIKind() == ARMABIInfo::AAPCS) ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8); + if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) { - // Update Allocated GPRs. Since this is only used when the size of the - // argument is greater than 64 bytes, this will always use up any available - // registers (of which there are 4). We also don't care about getting the - // alignment right, because general-purpose registers cannot be back-filled. - markAllocatedGPRs(1, 4); - return ABIArgInfo::getIndirect(TyAlign, /*ByVal=*/true, + return ABIArgInfo::getIndirect(ABIAlign, /*ByVal=*/true, /*Realign=*/TyAlign > ABIAlign); } @@ -4752,11 +4719,9 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic, if (getContext().getTypeAlign(Ty) <= 32) { ElemTy = llvm::Type::getInt32Ty(getVMContext()); SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32; - markAllocatedGPRs(1, SizeRegs); } else { ElemTy = llvm::Type::getInt64Ty(getVMContext()); SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64; - markAllocatedGPRs(2, SizeRegs * 2); } return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs)); @@ -4856,7 +4821,6 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, // Large vector types should be returned via memory. if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) { - markAllocatedGPRs(1, 1); return ABIArgInfo::getIndirect(0); } @@ -4894,7 +4858,6 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, } // Otherwise return in memory. - markAllocatedGPRs(1, 1); return ABIArgInfo::getIndirect(0); } @@ -4930,7 +4893,6 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext())); } - markAllocatedGPRs(1, 1); return ABIArgInfo::getIndirect(0); } @@ -5046,42 +5008,6 @@ llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, return AddrTyped; } -namespace { - -class NaClARMABIInfo : public ABIInfo { - public: - NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) - : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {} - void computeInfo(CGFunctionInfo &FI) const override; - llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, - CodeGenFunction &CGF) const override; - private: - PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv. - ARMABIInfo NInfo; // Used for everything else. -}; - -class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo { - public: - NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind) - : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {} -}; - -} - -void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const { - if (FI.getASTCallingConvention() == CC_PnaclCall) - PInfo.computeInfo(FI); - else - static_cast<const ABIInfo&>(NInfo).computeInfo(FI); -} - -llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, - CodeGenFunction &CGF) const { - // Always use the native convention; calling pnacl-style varargs functions - // is unsupported. - return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF); -} - //===----------------------------------------------------------------------===// // NVPTX ABI Implementation //===----------------------------------------------------------------------===// @@ -5190,18 +5116,22 @@ SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV, // Create !{<func-ref>, metadata !"kernel", i32 1} node addNVVMMetadata(F, "kernel", 1); } - if (FD->hasAttr<CUDALaunchBoundsAttr>()) { + if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) { // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node - addNVVMMetadata(F, "maxntidx", - FD->getAttr<CUDALaunchBoundsAttr>()->getMaxThreads()); - // min blocks is a default argument for CUDALaunchBoundsAttr, so getting a - // zero value from getMinBlocks either means it was not specified in - // __launch_bounds__ or the user specified a 0 value. In both cases, we - // don't have to add a PTX directive. - int MinCTASM = FD->getAttr<CUDALaunchBoundsAttr>()->getMinBlocks(); - if (MinCTASM > 0) { - // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node - addNVVMMetadata(F, "minctasm", MinCTASM); + llvm::APSInt MaxThreads(32); + MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext()); + if (MaxThreads > 0) + addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue()); + + // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was + // not specified in __launch_bounds__ or if the user specified a 0 value, + // we don't have to add a PTX directive. + if (Attr->getMinBlocks()) { + llvm::APSInt MinBlocks(32); + MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext()); + if (MinBlocks > 0) + // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node + addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue()); } } } @@ -5231,12 +5161,17 @@ void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name, namespace { class SystemZABIInfo : public ABIInfo { + bool HasVector; + public: - SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {} + SystemZABIInfo(CodeGenTypes &CGT, bool HV) + : ABIInfo(CGT), HasVector(HV) {} bool isPromotableIntegerType(QualType Ty) const; bool isCompoundType(QualType Ty) const; + bool isVectorArgumentType(QualType Ty) const; bool isFPArgumentType(QualType Ty) const; + QualType GetSingleElementType(QualType Ty) const; ABIArgInfo classifyReturnType(QualType RetTy) const; ABIArgInfo classifyArgumentType(QualType ArgTy) const; @@ -5254,8 +5189,8 @@ public: class SystemZTargetCodeGenInfo : public TargetCodeGenInfo { public: - SystemZTargetCodeGenInfo(CodeGenTypes &CGT) - : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {} + SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector) + : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {} }; } @@ -5282,7 +5217,15 @@ bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const { } bool SystemZABIInfo::isCompoundType(QualType Ty) const { - return Ty->isAnyComplexType() || isAggregateTypeForABI(Ty); + return (Ty->isAnyComplexType() || + Ty->isVectorType() || + isAggregateTypeForABI(Ty)); +} + +bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const { + return (HasVector && + Ty->isVectorType() && + getContext().getTypeSize(Ty) <= 128); } bool SystemZABIInfo::isFPArgumentType(QualType Ty) const { @@ -5295,9 +5238,13 @@ bool SystemZABIInfo::isFPArgumentType(QualType Ty) const { return false; } + return false; +} + +QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const { if (const RecordType *RT = Ty->getAsStructureType()) { const RecordDecl *RD = RT->getDecl(); - bool Found = false; + QualType Found; // If this is a C++ record, check the bases first. if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) @@ -5308,36 +5255,34 @@ bool SystemZABIInfo::isFPArgumentType(QualType Ty) const { if (isEmptyRecord(getContext(), Base, true)) continue; - if (Found) - return false; - Found = isFPArgumentType(Base); - if (!Found) - return false; + if (!Found.isNull()) + return Ty; + Found = GetSingleElementType(Base); } // Check the fields. for (const auto *FD : RD->fields()) { - // Empty bitfields don't affect things either way. + // For compatibility with GCC, ignore empty bitfields in C++ mode. // Unlike isSingleElementStruct(), empty structure and array fields // do count. So do anonymous bitfields that aren't zero-sized. - if (FD->isBitField() && FD->getBitWidthValue(getContext()) == 0) - return true; + if (getContext().getLangOpts().CPlusPlus && + FD->isBitField() && FD->getBitWidthValue(getContext()) == 0) + continue; // Unlike isSingleElementStruct(), arrays do not count. - // Nested isFPArgumentType structures still do though. - if (Found) - return false; - Found = isFPArgumentType(FD->getType()); - if (!Found) - return false; + // Nested structures still do though. + if (!Found.isNull()) + return Ty; + Found = GetSingleElementType(FD->getType()); } // Unlike isSingleElementStruct(), trailing padding is allowed. // An 8-byte aligned struct s { float f; } is passed as a double. - return Found; + if (!Found.isNull()) + return Found; } - return false; + return Ty; } llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, @@ -5350,26 +5295,56 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, // i8 *__reg_save_area; // }; - // Every argument occupies 8 bytes and is passed by preference in either - // GPRs or FPRs. + // Every non-vector argument occupies 8 bytes and is passed by preference + // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are + // always passed on the stack. Ty = CGF.getContext().getCanonicalType(Ty); + llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty); + llvm::Type *APTy = llvm::PointerType::getUnqual(ArgTy); ABIArgInfo AI = classifyArgumentType(Ty); - bool InFPRs = isFPArgumentType(Ty); - - llvm::Type *APTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty)); bool IsIndirect = AI.isIndirect(); + bool InFPRs = false; + bool IsVector = false; unsigned UnpaddedBitSize; if (IsIndirect) { APTy = llvm::PointerType::getUnqual(APTy); UnpaddedBitSize = 64; - } else + } else { + if (AI.getCoerceToType()) + ArgTy = AI.getCoerceToType(); + InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy(); + IsVector = ArgTy->isVectorTy(); UnpaddedBitSize = getContext().getTypeSize(Ty); - unsigned PaddedBitSize = 64; + } + unsigned PaddedBitSize = (IsVector && UnpaddedBitSize > 64) ? 128 : 64; assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size."); unsigned PaddedSize = PaddedBitSize / 8; unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8; + llvm::Type *IndexTy = CGF.Int64Ty; + llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize); + + if (IsVector) { + // Work out the address of a vector argument on the stack. + // Vector arguments are always passed in the high bits of a + // single (8 byte) or double (16 byte) stack slot. + llvm::Value *OverflowArgAreaPtr = + CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 2, + "overflow_arg_area_ptr"); + llvm::Value *OverflowArgArea = + CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"); + llvm::Value *MemAddr = + CGF.Builder.CreateBitCast(OverflowArgArea, APTy, "mem_addr"); + + // Update overflow_arg_area_ptr pointer + llvm::Value *NewOverflowArgArea = + CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area"); + CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr); + + return MemAddr; + } + unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding; if (InFPRs) { MaxRegs = 4; // Maximum of 4 FPR arguments @@ -5383,10 +5358,9 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, RegPadding = Padding; // values are passed in the low bits of a GPR } - llvm::Value *RegCountPtr = - CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr"); + llvm::Value *RegCountPtr = CGF.Builder.CreateStructGEP( + nullptr, VAListAddr, RegCountField, "reg_count_ptr"); llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count"); - llvm::Type *IndexTy = RegCount->getType(); llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs); llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV, "fits_in_regs"); @@ -5400,7 +5374,6 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CGF.EmitBlock(InRegBlock); // Work out the address of an argument register. - llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize); llvm::Value *ScaledRegCount = CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count"); llvm::Value *RegBase = @@ -5408,7 +5381,7 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, llvm::Value *RegOffset = CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset"); llvm::Value *RegSaveAreaPtr = - CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr"); + CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 3, "reg_save_area_ptr"); llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area"); llvm::Value *RawRegAddr = @@ -5427,8 +5400,8 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CGF.EmitBlock(InMemBlock); // Work out the address of a stack argument. - llvm::Value *OverflowArgAreaPtr = - CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr"); + llvm::Value *OverflowArgAreaPtr = CGF.Builder.CreateStructGEP( + nullptr, VAListAddr, 2, "overflow_arg_area_ptr"); llvm::Value *OverflowArgArea = CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"); llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding); @@ -5458,6 +5431,8 @@ llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const { if (RetTy->isVoidType()) return ABIArgInfo::getIgnore(); + if (isVectorArgumentType(RetTy)) + return ABIArgInfo::getDirect(); if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64) return ABIArgInfo::getIndirect(0); return (isPromotableIntegerType(RetTy) ? @@ -5473,8 +5448,16 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const { if (isPromotableIntegerType(Ty)) return ABIArgInfo::getExtend(); - // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly. + // Handle vector types and vector-like structure types. Note that + // as opposed to float-like structure types, we do not allow any + // padding for vector-like structures, so verify the sizes match. uint64_t Size = getContext().getTypeSize(Ty); + QualType SingleElementTy = GetSingleElementType(Ty); + if (isVectorArgumentType(SingleElementTy) && + getContext().getTypeSize(SingleElementTy) == Size) + return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy)); + + // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly. if (Size != 8 && Size != 16 && Size != 32 && Size != 64) return ABIArgInfo::getIndirect(0, /*ByVal=*/false); @@ -5488,7 +5471,7 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const { // The structure is passed as an unextended integer, a float, or a double. llvm::Type *PassTy; - if (isFPArgumentType(Ty)) { + if (isFPArgumentType(SingleElementTy)) { assert(Size == 32 || Size == 64); if (Size == 32) PassTy = llvm::Type::getFloatTy(getVMContext()); @@ -5568,6 +5551,7 @@ public: void computeInfo(CGFunctionInfo &FI) const override; llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const override; + bool shouldSignExtUnsignedType(QualType Ty) const override; }; class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { @@ -5870,6 +5854,16 @@ llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, return AddrTyped; } +bool MipsABIInfo::shouldSignExtUnsignedType(QualType Ty) const { + int TySize = getContext().getTypeSize(Ty); + + // MIPS64 ABI requires unsigned 32 bit integers to be sign extended. + if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32) + return true; + + return false; +} + bool MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const { @@ -6264,12 +6258,7 @@ private: // Check if Ty is a usable substitute for the coercion type. bool isUsableType(llvm::StructType *Ty) const { - if (Ty->getNumElements() != Elems.size()) - return false; - for (unsigned i = 0, e = Elems.size(); i != e; ++i) - if (Elems[i] != Ty->getElementType(i)) - return false; - return true; + return llvm::makeArrayRef(Elems) == Ty->elements(); } // Get the coercion type as a literal struct type. @@ -7102,6 +7091,12 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { case llvm::Triple::thumb: case llvm::Triple::thumbeb: { + if (Triple.getOS() == llvm::Triple::Win32) { + TheTargetCodeGenInfo = + new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP); + return *TheTargetCodeGenInfo; + } + ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS; if (getTarget().getABI() == "apcs-gnu") Kind = ARMABIInfo::APCS; @@ -7110,14 +7105,7 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { Triple.getEnvironment() == llvm::Triple::GNUEABIHF)) Kind = ARMABIInfo::AAPCS_VFP; - switch (Triple.getOS()) { - case llvm::Triple::NaCl: - return *(TheTargetCodeGenInfo = - new NaClARMTargetCodeGenInfo(Types, Kind)); - default: - return *(TheTargetCodeGenInfo = - new ARMTargetCodeGenInfo(Types, Kind)); - } + return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind)); } case llvm::Triple::ppc: @@ -7127,19 +7115,21 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1; if (getTarget().getABI() == "elfv2") Kind = PPC64_SVR4_ABIInfo::ELFv2; + bool HasQPX = getTarget().getABI() == "elfv1-qpx"; return *(TheTargetCodeGenInfo = - new PPC64_SVR4_TargetCodeGenInfo(Types, Kind)); + new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX)); } else return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types)); case llvm::Triple::ppc64le: { assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!"); PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2; - if (getTarget().getABI() == "elfv1") + if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx") Kind = PPC64_SVR4_ABIInfo::ELFv1; + bool HasQPX = getTarget().getABI() == "elfv1-qpx"; return *(TheTargetCodeGenInfo = - new PPC64_SVR4_TargetCodeGenInfo(Types, Kind)); + new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX)); } case llvm::Triple::nvptx: @@ -7149,8 +7139,11 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { case llvm::Triple::msp430: return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types)); - case llvm::Triple::systemz: - return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types)); + case llvm::Triple::systemz: { + bool HasVector = getTarget().getABI() == "vector"; + return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types, + HasVector)); + } case llvm::Triple::tce: return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types)); @@ -7177,18 +7170,13 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() { } case llvm::Triple::x86_64: { - bool HasAVX = getTarget().getABI() == "avx"; - switch (Triple.getOS()) { case llvm::Triple::Win32: - return *(TheTargetCodeGenInfo = - new WinX86_64TargetCodeGenInfo(Types, HasAVX)); - case llvm::Triple::NaCl: - return *(TheTargetCodeGenInfo = - new NaClX86_64TargetCodeGenInfo(Types, HasAVX)); + return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types)); + case llvm::Triple::PS4: + return *(TheTargetCodeGenInfo = new PS4TargetCodeGenInfo(Types)); default: - return *(TheTargetCodeGenInfo = - new X86_64TargetCodeGenInfo(Types, HasAVX)); + return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types)); } } case llvm::Triple::hexagon: diff --git a/contrib/llvm/tools/clang/lib/Driver/CrossWindowsToolChain.cpp b/contrib/llvm/tools/clang/lib/Driver/CrossWindowsToolChain.cpp index 03fe41b..82456e7 100644 --- a/contrib/llvm/tools/clang/lib/Driver/CrossWindowsToolChain.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/CrossWindowsToolChain.cpp @@ -60,7 +60,7 @@ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs, if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) { SmallString<128> ResourceDir(D.ResourceDir); llvm::sys::path::append(ResourceDir, "include"); - addSystemInclude(DriverArgs, CC1Args, ResourceDir.str()); + addSystemInclude(DriverArgs, CC1Args, ResourceDir); } addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/include"); } diff --git a/contrib/llvm/tools/clang/lib/Driver/Driver.cpp b/contrib/llvm/tools/clang/lib/Driver/Driver.cpp index 1664d0d..65d0049 100644 --- a/contrib/llvm/tools/clang/lib/Driver/Driver.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/Driver.cpp @@ -17,6 +17,7 @@ #include "clang/Driver/DriverDiagnostic.h" #include "clang/Driver/Job.h" #include "clang/Driver/Options.h" +#include "clang/Driver/SanitizerArgs.h" #include "clang/Driver/Tool.h" #include "clang/Driver/ToolChain.h" #include "llvm/ADT/ArrayRef.h" @@ -44,21 +45,19 @@ using namespace clang::driver; using namespace clang; using namespace llvm::opt; -Driver::Driver(StringRef ClangExecutable, - StringRef DefaultTargetTriple, +Driver::Driver(StringRef ClangExecutable, StringRef DefaultTargetTriple, DiagnosticsEngine &Diags) - : Opts(createDriverOptTable()), Diags(Diags), Mode(GCCMode), - ClangExecutable(ClangExecutable), SysRoot(DEFAULT_SYSROOT), - UseStdLib(true), DefaultTargetTriple(DefaultTargetTriple), - DriverTitle("clang LLVM compiler"), - CCPrintOptionsFilename(nullptr), CCPrintHeadersFilename(nullptr), - CCLogDiagnosticsFilename(nullptr), - CCCPrintBindings(false), - CCPrintHeaders(false), CCLogDiagnostics(false), - CCGenDiagnostics(false), CCCGenericGCCName(""), CheckInputsExist(true), - CCCUsePCH(true), SuppressMissingInputWarning(false) { - - Name = llvm::sys::path::stem(ClangExecutable); + : Opts(createDriverOptTable()), Diags(Diags), Mode(GCCMode), + SaveTemps(SaveTempsNone), ClangExecutable(ClangExecutable), + SysRoot(DEFAULT_SYSROOT), UseStdLib(true), + DefaultTargetTriple(DefaultTargetTriple), + DriverTitle("clang LLVM compiler"), CCPrintOptionsFilename(nullptr), + CCPrintHeadersFilename(nullptr), CCLogDiagnosticsFilename(nullptr), + CCCPrintBindings(false), CCPrintHeaders(false), CCLogDiagnostics(false), + CCGenDiagnostics(false), CCCGenericGCCName(""), CheckInputsExist(true), + CCCUsePCH(true), SuppressMissingInputWarning(false) { + + Name = llvm::sys::path::filename(ClangExecutable); Dir = llvm::sys::path::parent_path(ClangExecutable); // Compute the path to the resource directory. @@ -364,6 +363,13 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) { if (const Arg *A = Args->getLastArg(options::OPT_resource_dir)) ResourceDir = A->getValue(); + if (const Arg *A = Args->getLastArg(options::OPT_save_temps_EQ)) { + SaveTemps = llvm::StringSwitch<SaveTempsMode>(A->getValue()) + .Case("cwd", SaveTempsCwd) + .Case("obj", SaveTempsObj) + .Default(SaveTempsCwd); + } + // Perform the default argument translations. DerivedArgList *TranslatedArgs = TranslateInputArgs(*Args); @@ -504,7 +510,7 @@ void Driver::generateCompilationDiagnostics(Compilation &C, // If any of the preprocessing commands failed, clean up and exit. if (!FailingCommands.empty()) { - if (!C.getArgs().hasArg(options::OPT_save_temps)) + if (!isSaveTempsEnabled()) C.CleanupFileList(C.getTempFiles(), true); Diag(clang::diag::note_drv_command_failed_diag_msg) @@ -545,6 +551,9 @@ void Driver::generateCompilationDiagnostics(Compilation &C, Diag(clang::diag::note_drv_command_failed_diag_msg) << "Error generating run script: " + Script + " " + EC.message(); } else { + ScriptOS << "# Crash reproducer for " << getClangFullVersion() << "\n" + << "# Original command: "; + Cmd.Print(ScriptOS, "\n", /*Quote=*/true); Cmd.Print(ScriptOS, "\n", /*Quote=*/true, &CrashInfo); Diag(clang::diag::note_drv_command_failed_diag_msg) << Script; } @@ -612,7 +621,7 @@ int Driver::ExecuteCompilation(Compilation &C, const Command *FailingCommand = it->second; // Remove result files if we're not saving temps. - if (!C.getArgs().hasArg(options::OPT_save_temps)) { + if (!isSaveTempsEnabled()) { const JobAction *JA = cast<JobAction>(&FailingCommand->getSource()); C.CleanupFileMap(C.getResultFiles(), JA, true); @@ -970,7 +979,7 @@ static bool DiagnoseInputExistence(const Driver &D, const DerivedArgList &Args, SmallString<64> Path(Value); if (Arg *WorkDir = Args.getLastArg(options::OPT_working_directory)) { - if (!llvm::sys::path::is_absolute(Path.str())) { + if (!llvm::sys::path::is_absolute(Path)) { SmallString<64> Directory(WorkDir->getValue()); llvm::sys::path::append(Directory, Value); Path.assign(Directory); @@ -980,10 +989,11 @@ static bool DiagnoseInputExistence(const Driver &D, const DerivedArgList &Args, if (llvm::sys::fs::exists(Twine(Path))) return true; - if (D.IsCLMode() && llvm::sys::Process::FindInEnvPath("LIB", Value)) + if (D.IsCLMode() && !llvm::sys::path::is_absolute(Twine(Path)) && + llvm::sys::Process::FindInEnvPath("LIB", Value)) return true; - D.Diag(clang::diag::err_drv_no_such_file) << Path.str(); + D.Diag(clang::diag::err_drv_no_such_file) << Path; return false; } @@ -1264,7 +1274,7 @@ void Driver::BuildActions(const ToolChain &TC, DerivedArgList &Args, continue; // Otherwise construct the appropriate action. - Current = ConstructPhaseAction(Args, Phase, std::move(Current)); + Current = ConstructPhaseAction(TC, Args, Phase, std::move(Current)); if (Current->getType() == types::TY_Nothing) break; } @@ -1290,7 +1300,8 @@ void Driver::BuildActions(const ToolChain &TC, DerivedArgList &Args, } std::unique_ptr<Action> -Driver::ConstructPhaseAction(const ArgList &Args, phases::ID Phase, +Driver::ConstructPhaseAction(const ToolChain &TC, const ArgList &Args, + phases::ID Phase, std::unique_ptr<Action> Input) const { llvm::PrettyStackTraceString CrashInfo("Constructing phase actions"); // Build the appropriate action. @@ -1349,7 +1360,7 @@ Driver::ConstructPhaseAction(const ArgList &Args, phases::ID Phase, types::TY_LLVM_BC); } case phases::Backend: { - if (IsUsingLTO(Args)) { + if (IsUsingLTO(TC, Args)) { types::ID Output = Args.hasArg(options::OPT_S) ? types::TY_LTO_IR : types::TY_LTO_BC; return llvm::make_unique<BackendJobAction>(std::move(Input), Output); @@ -1370,7 +1381,10 @@ Driver::ConstructPhaseAction(const ArgList &Args, phases::ID Phase, llvm_unreachable("invalid phase in ConstructPhaseAction"); } -bool Driver::IsUsingLTO(const ArgList &Args) const { +bool Driver::IsUsingLTO(const ToolChain &TC, const ArgList &Args) const { + if (TC.getSanitizerArgs().needsLTO()) + return true; + if (Args.hasFlag(options::OPT_flto, options::OPT_fno_lto, false)) return true; @@ -1471,8 +1485,8 @@ void Driver::BuildJobs(Compilation &C) const { } } -static const Tool *SelectToolForJob(Compilation &C, const ToolChain *TC, - const JobAction *JA, +static const Tool *SelectToolForJob(Compilation &C, bool SaveTemps, + const ToolChain *TC, const JobAction *JA, const ActionList *&Inputs) { const Tool *ToolForJob = nullptr; @@ -1481,7 +1495,7 @@ static const Tool *SelectToolForJob(Compilation &C, const ToolChain *TC, // compiler input. if (TC->useIntegratedAs() && - !C.getArgs().hasArg(options::OPT_save_temps) && + !SaveTemps && !C.getArgs().hasArg(options::OPT_via_file_asm) && !C.getArgs().hasArg(options::OPT__SLASH_FA) && !C.getArgs().hasArg(options::OPT__SLASH_Fa) && @@ -1512,8 +1526,7 @@ static const Tool *SelectToolForJob(Compilation &C, const ToolChain *TC, const Tool *Compiler = TC->SelectTool(*CompileJA); if (!Compiler) return nullptr; - if (!Compiler->canEmitIR() || - !C.getArgs().hasArg(options::OPT_save_temps)) { + if (!Compiler->canEmitIR() || !SaveTemps) { Inputs = &(*Inputs)[0]->getInputs(); ToolForJob = Compiler; } @@ -1529,7 +1542,7 @@ static const Tool *SelectToolForJob(Compilation &C, const ToolChain *TC, if (Inputs->size() == 1 && isa<PreprocessJobAction>(*Inputs->begin()) && !C.getArgs().hasArg(options::OPT_no_integrated_cpp) && !C.getArgs().hasArg(options::OPT_traditional_cpp) && - !C.getArgs().hasArg(options::OPT_save_temps) && + !SaveTemps && !C.getArgs().hasArg(options::OPT_rewrite_objc) && ToolForJob->hasIntegratedCPP()) Inputs = &(*Inputs)[0]->getInputs(); @@ -1577,7 +1590,7 @@ void Driver::BuildJobsForAction(Compilation &C, const ActionList *Inputs = &A->getInputs(); const JobAction *JA = cast<JobAction>(A); - const Tool *T = SelectToolForJob(C, TC, JA, Inputs); + const Tool *T = SelectToolForJob(C, isSaveTempsEnabled(), TC, JA, Inputs); if (!T) return; @@ -1708,7 +1721,7 @@ const char *Driver::GetNamedOutputPath(Compilation &C, } // Output to a temporary file? - if ((!AtTopLevel && !C.getArgs().hasArg(options::OPT_save_temps) && + if ((!AtTopLevel && !isSaveTempsEnabled() && !C.getArgs().hasArg(options::OPT__SLASH_Fo)) || CCGenDiagnostics) { StringRef Name = llvm::sys::path::filename(BaseInput); @@ -1780,11 +1793,20 @@ const char *Driver::GetNamedOutputPath(Compilation &C, NamedOutput = C.getArgs().MakeArgString(Suffixed.c_str()); } + // Prepend object file path if -save-temps=obj + if (!AtTopLevel && isSaveTempsObj() && C.getArgs().hasArg(options::OPT_o) && + JA.getType() != types::TY_PCH) { + Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o); + SmallString<128> TempPath(FinalOutput->getValue()); + llvm::sys::path::remove_filename(TempPath); + StringRef OutputFileName = llvm::sys::path::filename(NamedOutput); + llvm::sys::path::append(TempPath, OutputFileName); + NamedOutput = C.getArgs().MakeArgString(TempPath.c_str()); + } + // If we're saving temps and the temp file conflicts with the input file, // then avoid overwriting input file. - if (!AtTopLevel && C.getArgs().hasArg(options::OPT_save_temps) && - NamedOutput == BaseName) { - + if (!AtTopLevel && isSaveTempsEnabled() && NamedOutput == BaseName) { bool SameFile = false; SmallString<256> Result; llvm::sys::fs::current_path(Result); @@ -2003,12 +2025,15 @@ static llvm::Triple computeTargetTriple(StringRef DefaultTargetTriple, const ToolChain &Driver::getToolChain(const ArgList &Args, StringRef DarwinArchName) const { - llvm::Triple Target = computeTargetTriple(DefaultTargetTriple, Args, - DarwinArchName); + llvm::Triple Target = + computeTargetTriple(DefaultTargetTriple, Args, DarwinArchName); ToolChain *&TC = ToolChains[Target.str()]; if (!TC) { switch (Target.getOS()) { + case llvm::Triple::CloudABI: + TC = new toolchains::CloudABI(*this, Target, Args); + break; case llvm::Triple::Darwin: case llvm::Triple::MacOSX: case llvm::Triple::IOS: @@ -2038,6 +2063,9 @@ const ToolChain &Driver::getToolChain(const ArgList &Args, else TC = new toolchains::Linux(*this, Target, Args); break; + case llvm::Triple::NaCl: + TC = new toolchains::NaCl_TC(*this, Target, Args); + break; case llvm::Triple::Solaris: TC = new toolchains::Solaris(*this, Target, Args); break; @@ -2069,29 +2097,20 @@ const ToolChain &Driver::getToolChain(const ArgList &Args, } break; default: - // TCE is an OSless target - if (Target.getArchName() == "tce") { + // Of these targets, Hexagon is the only one that might have + // an OS of Linux, in which case it got handled above already. + if (Target.getArchName() == "tce") TC = new toolchains::TCEToolChain(*this, Target, Args); - break; - } - // If Hexagon is configured as an OSless target - if (Target.getArch() == llvm::Triple::hexagon) { + else if (Target.getArch() == llvm::Triple::hexagon) TC = new toolchains::Hexagon_TC(*this, Target, Args); - break; - } - if (Target.getArch() == llvm::Triple::xcore) { + else if (Target.getArch() == llvm::Triple::xcore) TC = new toolchains::XCore(*this, Target, Args); - break; - } - if (Target.isOSBinFormatELF()) { + else if (Target.isOSBinFormatELF()) TC = new toolchains::Generic_ELF(*this, Target, Args); - break; - } - if (Target.isOSBinFormatMachO()) { + else if (Target.isOSBinFormatMachO()) TC = new toolchains::MachO(*this, Target, Args); - break; - } - TC = new toolchains::Generic_GCC(*this, Target, Args); + else + TC = new toolchains::Generic_GCC(*this, Target, Args); break; } } @@ -2125,7 +2144,7 @@ bool Driver::GetReleaseVersion(const char *Str, unsigned &Major, Major = Minor = Micro = 0; if (*Str == '\0') - return true; + return false; char *End; Major = (unsigned) strtol(Str, &End, 10); diff --git a/contrib/llvm/tools/clang/lib/Driver/Job.cpp b/contrib/llvm/tools/clang/lib/Driver/Job.cpp index c5b3f5a..6d18a41 100644 --- a/contrib/llvm/tools/clang/lib/Driver/Job.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/Job.cpp @@ -34,7 +34,7 @@ Command::Command(const Action &_Source, const Tool &_Creator, Executable(_Executable), Arguments(_Arguments), ResponseFile(nullptr) {} -static int skipArgs(const char *Flag) { +static int skipArgs(const char *Flag, bool HaveCrashVFS) { // These flags are all of the form -Flag <Arg> and are treated as two // arguments. Therefore, we need to skip the flag and the next argument. bool Res = llvm::StringSwitch<bool>(Flag) @@ -43,9 +43,11 @@ static int skipArgs(const char *Flag) { .Cases("-fdebug-compilation-dir", "-idirafter", true) .Cases("-include", "-include-pch", "-internal-isystem", true) .Cases("-internal-externc-isystem", "-iprefix", "-iwithprefix", true) - .Cases("-iwithprefixbefore", "-isysroot", "-isystem", "-iquote", true) + .Cases("-iwithprefixbefore", "-isystem", "-iquote", true) .Cases("-resource-dir", "-serialize-diagnostic-file", true) .Cases("-dwarf-debug-flags", "-ivfsoverlay", true) + // Some include flags shouldn't be skipped if we have a crash VFS + .Case("-isysroot", !HaveCrashVFS) .Default(false); // Match found. @@ -164,11 +166,12 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote, if (StringRef(Args[I]).equals("-main-file-name")) MainFilename = Args[I + 1]; + bool HaveCrashVFS = CrashInfo && !CrashInfo->VFSPath.empty(); for (size_t i = 0, e = Args.size(); i < e; ++i) { const char *const Arg = Args[i]; if (CrashInfo) { - if (int Skip = skipArgs(Arg)) { + if (int Skip = skipArgs(Arg, HaveCrashVFS)) { i += Skip - 1; continue; } else if (llvm::sys::path::filename(Arg) == MainFilename && @@ -185,7 +188,7 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote, PrintArg(OS, Arg, Quote); } - if (CrashInfo && !CrashInfo->VFSPath.empty()) { + if (CrashInfo && HaveCrashVFS) { OS << ' '; PrintArg(OS, "-ivfsoverlay", Quote); OS << ' '; @@ -217,8 +220,7 @@ int Command::Execute(const StringRef **Redirects, std::string *ErrMsg, if (ResponseFile == nullptr) { Argv.push_back(Executable); - for (size_t i = 0, e = Arguments.size(); i != e; ++i) - Argv.push_back(Arguments[i]); + Argv.append(Arguments.begin(), Arguments.end()); Argv.push_back(nullptr); return llvm::sys::ExecuteAndWait(Executable, Argv.data(), /*env*/ nullptr, diff --git a/contrib/llvm/tools/clang/lib/Driver/MSVCToolChain.cpp b/contrib/llvm/tools/clang/lib/Driver/MSVCToolChain.cpp index d6bd5c3..7739cb0 100644 --- a/contrib/llvm/tools/clang/lib/Driver/MSVCToolChain.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/MSVCToolChain.cpp @@ -21,6 +21,7 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/Process.h" +#include <cstdio> // Include the necessary headers to interface with the Windows registry and // environment. @@ -212,7 +213,7 @@ bool MSVCToolChain::getWindowsSDKDir(std::string &path, int &major, "SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\$VERSION", "InstallationFolder", path, &sdkVersion); if (!sdkVersion.empty()) - ::sscanf(sdkVersion.c_str(), "v%d.%d", &major, &minor); + std::sscanf(sdkVersion.c_str(), "v%d.%d", &major, &minor); return hasSDKDir && !path.empty(); } @@ -423,7 +424,7 @@ void MSVCToolChain::AddSystemIncludeWithSubfolder(const ArgList &DriverArgs, const char *subfolder) const { llvm::SmallString<128> path(folder); llvm::sys::path::append(path, subfolder); - addSystemInclude(DriverArgs, CC1Args, path.str()); + addSystemInclude(DriverArgs, CC1Args, path); } void MSVCToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs, @@ -434,7 +435,7 @@ void MSVCToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs, if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) { SmallString<128> P(getDriver().ResourceDir); llvm::sys::path::append(P, "include"); - addSystemInclude(DriverArgs, CC1Args, P.str()); + addSystemInclude(DriverArgs, CC1Args, P); } if (DriverArgs.hasArg(options::OPT_nostdlibinc)) diff --git a/contrib/llvm/tools/clang/lib/Driver/Multilib.cpp b/contrib/llvm/tools/clang/lib/Driver/Multilib.cpp index 1f5d62f..8acda67 100644 --- a/contrib/llvm/tools/clang/lib/Driver/Multilib.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/Multilib.cpp @@ -151,41 +151,23 @@ MultilibSet &MultilibSet::Maybe(const Multilib &M) { } MultilibSet &MultilibSet::Either(const Multilib &M1, const Multilib &M2) { - std::vector<Multilib> Ms; - Ms.push_back(M1); - Ms.push_back(M2); - return Either(Ms); + return Either({M1, M2}); } MultilibSet &MultilibSet::Either(const Multilib &M1, const Multilib &M2, const Multilib &M3) { - std::vector<Multilib> Ms; - Ms.push_back(M1); - Ms.push_back(M2); - Ms.push_back(M3); - return Either(Ms); + return Either({M1, M2, M3}); } MultilibSet &MultilibSet::Either(const Multilib &M1, const Multilib &M2, const Multilib &M3, const Multilib &M4) { - std::vector<Multilib> Ms; - Ms.push_back(M1); - Ms.push_back(M2); - Ms.push_back(M3); - Ms.push_back(M4); - return Either(Ms); + return Either({M1, M2, M3, M4}); } MultilibSet &MultilibSet::Either(const Multilib &M1, const Multilib &M2, const Multilib &M3, const Multilib &M4, const Multilib &M5) { - std::vector<Multilib> Ms; - Ms.push_back(M1); - Ms.push_back(M2); - Ms.push_back(M3); - Ms.push_back(M4); - Ms.push_back(M5); - return Either(Ms); + return Either({M1, M2, M3, M4, M5}); } static Multilib compose(const Multilib &Base, const Multilib &New) { @@ -197,7 +179,7 @@ static Multilib compose(const Multilib &Base, const Multilib &New) { llvm::sys::path::append(IncludeSuffix, "/", Base.includeSuffix(), New.includeSuffix()); - Multilib Composed(GCCSuffix.str(), OSSuffix.str(), IncludeSuffix.str()); + Multilib Composed(GCCSuffix, OSSuffix, IncludeSuffix); Multilib::flags_list &Flags = Composed.flags(); @@ -207,8 +189,7 @@ static Multilib compose(const Multilib &Base, const Multilib &New) { return Composed; } -MultilibSet & -MultilibSet::Either(const std::vector<Multilib> &MultilibSegments) { +MultilibSet &MultilibSet::Either(ArrayRef<Multilib> MultilibSegments) { multilib_list Composed; if (Multilibs.empty()) @@ -229,30 +210,23 @@ MultilibSet::Either(const std::vector<Multilib> &MultilibSegments) { return *this; } -MultilibSet &MultilibSet::FilterOut(const MultilibSet::FilterCallback &F) { +MultilibSet &MultilibSet::FilterOut(FilterCallback F) { filterInPlace(F, Multilibs); return *this; } -MultilibSet &MultilibSet::FilterOut(std::string Regex) { - class REFilter : public MultilibSet::FilterCallback { - mutable llvm::Regex R; - - public: - REFilter(std::string Regex) : R(Regex) {} - bool operator()(const Multilib &M) const override { - std::string Error; - if (!R.isValid(Error)) { - llvm::errs() << Error; - assert(false); - return false; - } - return R.match(M.gccSuffix()); - } - }; +MultilibSet &MultilibSet::FilterOut(const char *Regex) { + llvm::Regex R(Regex); +#ifndef NDEBUG + std::string Error; + if (!R.isValid(Error)) { + llvm::errs() << Error; + llvm_unreachable("Invalid regex!"); + } +#endif - REFilter REF(Regex); - filterInPlace(REF, Multilibs); + filterInPlace([&R](const Multilib &M) { return R.match(M.gccSuffix()); }, + Multilibs); return *this; } @@ -262,38 +236,29 @@ void MultilibSet::combineWith(const MultilibSet &Other) { Multilibs.insert(Multilibs.end(), Other.begin(), Other.end()); } +static bool isFlagEnabled(StringRef Flag) { + char Indicator = Flag.front(); + assert(Indicator == '+' || Indicator == '-'); + return Indicator == '+'; +} + bool MultilibSet::select(const Multilib::flags_list &Flags, Multilib &M) const { - class FilterFlagsMismatch : public MultilibSet::FilterCallback { - llvm::StringMap<bool> FlagSet; - - public: - FilterFlagsMismatch(const std::vector<std::string> &Flags) { - // Stuff all of the flags into the FlagSet such that a true mappend - // indicates the flag was enabled, and a false mappend indicates the - // flag was disabled - for (StringRef Flag : Flags) - FlagSet[Flag.substr(1)] = isFlagEnabled(Flag); - } - bool operator()(const Multilib &M) const override { - for (StringRef Flag : M.flags()) { - llvm::StringMap<bool>::const_iterator SI = FlagSet.find(Flag.substr(1)); - if (SI != FlagSet.end()) - if (SI->getValue() != isFlagEnabled(Flag)) - return true; - } - return false; - } - private: - bool isFlagEnabled(StringRef Flag) const { - char Indicator = Flag.front(); - assert(Indicator == '+' || Indicator == '-'); - return Indicator == '+'; + llvm::StringMap<bool> FlagSet; + + // Stuff all of the flags into the FlagSet such that a true mappend indicates + // the flag was enabled, and a false mappend indicates the flag was disabled. + for (StringRef Flag : Flags) + FlagSet[Flag.substr(1)] = isFlagEnabled(Flag); + + multilib_list Filtered = filterCopy([&FlagSet](const Multilib &M) { + for (StringRef Flag : M.flags()) { + llvm::StringMap<bool>::const_iterator SI = FlagSet.find(Flag.substr(1)); + if (SI != FlagSet.end()) + if (SI->getValue() != isFlagEnabled(Flag)) + return true; } - }; - - FilterFlagsMismatch FlagsMismatch(Flags); - - multilib_list Filtered = filterCopy(FlagsMismatch, Multilibs); + return false; + }, Multilibs); if (Filtered.size() == 0) { return false; @@ -313,19 +278,15 @@ void MultilibSet::print(raw_ostream &OS) const { OS << M << "\n"; } -MultilibSet::multilib_list -MultilibSet::filterCopy(const MultilibSet::FilterCallback &F, - const multilib_list &Ms) { +MultilibSet::multilib_list MultilibSet::filterCopy(FilterCallback F, + const multilib_list &Ms) { multilib_list Copy(Ms); filterInPlace(F, Copy); return Copy; } -void MultilibSet::filterInPlace(const MultilibSet::FilterCallback &F, - multilib_list &Ms) { - Ms.erase(std::remove_if(Ms.begin(), Ms.end(), - [&F](const Multilib &M) { return F(M); }), - Ms.end()); +void MultilibSet::filterInPlace(FilterCallback F, multilib_list &Ms) { + Ms.erase(std::remove_if(Ms.begin(), Ms.end(), F), Ms.end()); } raw_ostream &clang::driver::operator<<(raw_ostream &OS, const MultilibSet &MS) { diff --git a/contrib/llvm/tools/clang/lib/Driver/SanitizerArgs.cpp b/contrib/llvm/tools/clang/lib/Driver/SanitizerArgs.cpp index bd7bc21..72530b4 100644 --- a/contrib/llvm/tools/clang/lib/Driver/SanitizerArgs.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/SanitizerArgs.cpp @@ -7,6 +7,7 @@ // //===----------------------------------------------------------------------===// #include "clang/Driver/SanitizerArgs.h" +#include "clang/Basic/Sanitizers.h" #include "clang/Driver/Driver.h" #include "clang/Driver/DriverDiagnostic.h" #include "clang/Driver/Options.h" @@ -18,74 +19,41 @@ #include "llvm/Support/SpecialCaseList.h" #include <memory> +using namespace clang; +using namespace clang::SanitizerKind; using namespace clang::driver; using namespace llvm::opt; -namespace { -/// Assign ordinals to possible values of -fsanitize= flag. -/// We use the ordinal values as bit positions within \c SanitizeKind. -enum SanitizeOrdinal { -#define SANITIZER(NAME, ID) SO_##ID, -#define SANITIZER_GROUP(NAME, ID, ALIAS) SO_##ID##Group, -#include "clang/Basic/Sanitizers.def" - SO_Count -}; - -/// Represents a set of sanitizer kinds. It is also used to define: -/// 1) set of sanitizers each sanitizer group expands into. -/// 2) set of sanitizers sharing a specific property (e.g. -/// all sanitizers with zero-base shadow). -enum SanitizeKind { -#define SANITIZER(NAME, ID) ID = 1 << SO_##ID, -#define SANITIZER_GROUP(NAME, ID, ALIAS) \ -ID = ALIAS, ID##Group = 1 << SO_##ID##Group, -#include "clang/Basic/Sanitizers.def" +enum : SanitizerMask { NeedsUbsanRt = Undefined | Integer, NotAllowedWithTrap = Vptr, RequiresPIE = Memory | DataFlow, NeedsUnwindTables = Address | Thread | Memory | DataFlow, - SupportsCoverage = Address | Memory | Leak | Undefined | Integer, + SupportsCoverage = Address | Memory | Leak | Undefined | Integer | DataFlow, RecoverableByDefault = Undefined | Integer, Unrecoverable = Address | Unreachable | Return, - LegacyFsanitizeRecoverMask = Undefined | Integer + LegacyFsanitizeRecoverMask = Undefined | Integer, + NeedsLTO = CFI, }; -} - -/// Returns true if set of \p Sanitizers contain at least one sanitizer from -/// \p Kinds. -static bool hasOneOf(const clang::SanitizerSet &Sanitizers, unsigned Kinds) { -#define SANITIZER(NAME, ID) \ - if (Sanitizers.has(clang::SanitizerKind::ID) && (Kinds & ID)) \ - return true; -#include "clang/Basic/Sanitizers.def" - return false; -} - -/// Adds all sanitizers from \p Kinds to \p Sanitizers. -static void addAllOf(clang::SanitizerSet &Sanitizers, unsigned Kinds) { -#define SANITIZER(NAME, ID) \ - if (Kinds & ID) \ - Sanitizers.set(clang::SanitizerKind::ID, true); -#include "clang/Basic/Sanitizers.def" -} - -static unsigned toSanitizeKind(clang::SanitizerKind K) { -#define SANITIZER(NAME, ID) \ - if (K == clang::SanitizerKind::ID) \ - return ID; -#include "clang/Basic/Sanitizers.def" - llvm_unreachable("Invalid SanitizerKind!"); -} -/// Parse a single value from a -fsanitize= or -fno-sanitize= value list. -/// Returns a member of the \c SanitizeKind enumeration, or \c 0 -/// if \p Value is not known. -static unsigned parseValue(const char *Value); +enum CoverageFeature { + CoverageFunc = 1 << 0, + CoverageBB = 1 << 1, + CoverageEdge = 1 << 2, + CoverageIndirCall = 1 << 3, + CoverageTraceBB = 1 << 4, + CoverageTraceCmp = 1 << 5, + Coverage8bitCounters = 1 << 6, +}; /// Parse a -fsanitize= or -fno-sanitize= argument's values, diagnosing any -/// invalid components. Returns OR of members of \c SanitizeKind enumeration. -static unsigned parseArgValues(const Driver &D, const llvm::opt::Arg *A, - bool DiagnoseErrors); +/// invalid components. Returns a SanitizerMask. +static SanitizerMask parseArgValues(const Driver &D, const llvm::opt::Arg *A, + bool DiagnoseErrors); + +/// Parse -f(no-)?sanitize-coverage= flag values, diagnosing any invalid +/// components. Returns OR of members of \c CoverageFeature enumeration. +static int parseCoverageFeatures(const Driver &D, const llvm::opt::Arg *A); /// Produce an argument string from ArgList \p Args, which shows how it /// provides some sanitizer kind from \p Mask. For example, the argument list @@ -93,39 +61,32 @@ static unsigned parseArgValues(const Driver &D, const llvm::opt::Arg *A, /// would produce "-fsanitize=vptr". static std::string lastArgumentForMask(const Driver &D, const llvm::opt::ArgList &Args, - unsigned Mask); - -static std::string lastArgumentForKind(const Driver &D, - const llvm::opt::ArgList &Args, - clang::SanitizerKind K) { - return lastArgumentForMask(D, Args, toSanitizeKind(K)); -} + SanitizerMask Mask); /// Produce an argument string from argument \p A, which shows how it provides /// a value in \p Mask. For instance, the argument /// "-fsanitize=address,alignment" with mask \c NeedsUbsanRt would produce /// "-fsanitize=alignment". -static std::string describeSanitizeArg(const llvm::opt::Arg *A, unsigned Mask); +static std::string describeSanitizeArg(const llvm::opt::Arg *A, + SanitizerMask Mask); /// Produce a string containing comma-separated names of sanitizers in \p /// Sanitizers set. static std::string toString(const clang::SanitizerSet &Sanitizers); -/// For each sanitizer group bit set in \p Kinds, set the bits for sanitizers -/// this group enables. -static unsigned expandGroups(unsigned Kinds); - -static unsigned getToolchainUnsupportedKinds(const ToolChain &TC) { +static SanitizerMask getToolchainUnsupportedKinds(const ToolChain &TC) { bool IsFreeBSD = TC.getTriple().getOS() == llvm::Triple::FreeBSD; bool IsLinux = TC.getTriple().getOS() == llvm::Triple::Linux; bool IsX86 = TC.getTriple().getArch() == llvm::Triple::x86; bool IsX86_64 = TC.getTriple().getArch() == llvm::Triple::x86_64; + bool IsMIPS64 = TC.getTriple().getArch() == llvm::Triple::mips64 || + TC.getTriple().getArch() == llvm::Triple::mips64el; - unsigned Unsupported = 0; - if (!(IsLinux && IsX86_64)) { + SanitizerMask Unsupported = 0; + if (!(IsLinux && (IsX86_64 || IsMIPS64))) { Unsupported |= Memory | DataFlow; } - if (!((IsLinux || IsFreeBSD) && IsX86_64)) { + if (!((IsLinux || IsFreeBSD) && (IsX86_64 || IsMIPS64))) { Unsupported |= Thread; } if (!(IsLinux && (IsX86 || IsX86_64))) { @@ -134,23 +95,51 @@ static unsigned getToolchainUnsupportedKinds(const ToolChain &TC) { return Unsupported; } +static bool getDefaultBlacklist(const Driver &D, SanitizerMask Kinds, + std::string &BLPath) { + const char *BlacklistFile = nullptr; + if (Kinds & Address) + BlacklistFile = "asan_blacklist.txt"; + else if (Kinds & Memory) + BlacklistFile = "msan_blacklist.txt"; + else if (Kinds & Thread) + BlacklistFile = "tsan_blacklist.txt"; + else if (Kinds & DataFlow) + BlacklistFile = "dfsan_abilist.txt"; + + if (BlacklistFile) { + clang::SmallString<64> Path(D.ResourceDir); + llvm::sys::path::append(Path, BlacklistFile); + BLPath = Path.str(); + return true; + } + return false; +} + bool SanitizerArgs::needsUbsanRt() const { - return !UbsanTrapOnError && hasOneOf(Sanitizers, NeedsUbsanRt); + return !UbsanTrapOnError && (Sanitizers.Mask & NeedsUbsanRt) && + !Sanitizers.has(Address) && + !Sanitizers.has(Memory) && + !Sanitizers.has(Thread); } bool SanitizerArgs::requiresPIE() const { - return AsanZeroBaseShadow || hasOneOf(Sanitizers, RequiresPIE); + return AsanZeroBaseShadow || (Sanitizers.Mask & RequiresPIE); } bool SanitizerArgs::needsUnwindTables() const { - return hasOneOf(Sanitizers, NeedsUnwindTables); + return Sanitizers.Mask & NeedsUnwindTables; +} + +bool SanitizerArgs::needsLTO() const { + return Sanitizers.Mask & NeedsLTO; } void SanitizerArgs::clear() { Sanitizers.clear(); RecoverableSanitizers.clear(); - BlacklistFile = ""; - SanitizeCoverage = 0; + BlacklistFiles.clear(); + CoverageFeatures = 0; MsanTrackOrigins = 0; AsanFieldPadding = 0; AsanZeroBaseShadow = false; @@ -162,27 +151,36 @@ void SanitizerArgs::clear() { SanitizerArgs::SanitizerArgs(const ToolChain &TC, const llvm::opt::ArgList &Args) { clear(); - unsigned AllRemove = 0; // During the loop below, the accumulated set of - // sanitizers disabled by the current sanitizer - // argument or any argument after it. - unsigned DiagnosedKinds = 0; // All Kinds we have diagnosed up to now. - // Used to deduplicate diagnostics. - unsigned Kinds = 0; - unsigned NotSupported = getToolchainUnsupportedKinds(TC); + SanitizerMask AllRemove = 0; // During the loop below, the accumulated set of + // sanitizers disabled by the current sanitizer + // argument or any argument after it. + SanitizerMask AllAddedKinds = 0; // Mask of all sanitizers ever enabled by + // -fsanitize= flags (directly or via group + // expansion), some of which may be disabled + // later. Used to carefully prune + // unused-argument diagnostics. + SanitizerMask DiagnosedKinds = 0; // All Kinds we have diagnosed up to now. + // Used to deduplicate diagnostics. + SanitizerMask Kinds = 0; + SanitizerMask NotSupported = getToolchainUnsupportedKinds(TC); + ToolChain::RTTIMode RTTIMode = TC.getRTTIMode(); + const Driver &D = TC.getDriver(); for (ArgList::const_reverse_iterator I = Args.rbegin(), E = Args.rend(); I != E; ++I) { const auto *Arg = *I; if (Arg->getOption().matches(options::OPT_fsanitize_EQ)) { Arg->claim(); - unsigned Add = parseArgValues(D, Arg, true); + SanitizerMask Add = parseArgValues(D, Arg, true); + AllAddedKinds |= expandSanitizerGroups(Add); // Avoid diagnosing any sanitizer which is disabled later. Add &= ~AllRemove; // At this point we have not expanded groups, so any unsupported // sanitizers in Add are those which have been explicitly enabled. // Diagnose them. - if (unsigned KindsToDiagnose = Add & NotSupported & ~DiagnosedKinds) { + if (SanitizerMask KindsToDiagnose = + Add & NotSupported & ~DiagnosedKinds) { // Only diagnose the new kinds. std::string Desc = describeSanitizeArg(*I, KindsToDiagnose); D.Diag(diag::err_drv_unsupported_opt_for_target) @@ -191,7 +189,29 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC, } Add &= ~NotSupported; - Add = expandGroups(Add); + // Test for -fno-rtti + explicit -fsanitizer=vptr before expanding groups + // so we don't error out if -fno-rtti and -fsanitize=undefined were + // passed. + if (Add & Vptr && + (RTTIMode == ToolChain::RM_DisabledImplicitly || + RTTIMode == ToolChain::RM_DisabledExplicitly)) { + if (RTTIMode == ToolChain::RM_DisabledImplicitly) + // Warn about not having rtti enabled if the vptr sanitizer is + // explicitly enabled + D.Diag(diag::warn_drv_disabling_vptr_no_rtti_default); + else { + const llvm::opt::Arg *NoRTTIArg = TC.getRTTIArg(); + assert(NoRTTIArg && + "RTTI disabled explicitly but we have no argument!"); + D.Diag(diag::err_drv_argument_not_allowed_with) + << "-fsanitize=vptr" << NoRTTIArg->getAsString(Args); + } + + // Take out the Vptr sanitizer from the enabled sanitizers + AllRemove |= Vptr; + } + + Add = expandSanitizerGroups(Add); // Group expansion may have enabled a sanitizer which is disabled later. Add &= ~AllRemove; // Silently discard any unsupported sanitizers implicitly enabled through @@ -201,122 +221,128 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC, Kinds |= Add; } else if (Arg->getOption().matches(options::OPT_fno_sanitize_EQ)) { Arg->claim(); - unsigned Remove = parseArgValues(D, Arg, true); - AllRemove |= expandGroups(Remove); + SanitizerMask Remove = parseArgValues(D, Arg, true); + AllRemove |= expandSanitizerGroups(Remove); + } + } + + // We disable the vptr sanitizer if it was enabled by group expansion but RTTI + // is disabled. + if ((Kinds & Vptr) && + (RTTIMode == ToolChain::RM_DisabledImplicitly || + RTTIMode == ToolChain::RM_DisabledExplicitly)) { + Kinds &= ~Vptr; + } + + // Warn about undefined sanitizer options that require runtime support. + UbsanTrapOnError = + Args.hasFlag(options::OPT_fsanitize_undefined_trap_on_error, + options::OPT_fno_sanitize_undefined_trap_on_error, false); + if (UbsanTrapOnError && (Kinds & NotAllowedWithTrap)) { + D.Diag(clang::diag::err_drv_argument_not_allowed_with) + << lastArgumentForMask(D, Args, NotAllowedWithTrap) + << "-fsanitize-undefined-trap-on-error"; + Kinds &= ~NotAllowedWithTrap; + } + + // Warn about incompatible groups of sanitizers. + std::pair<SanitizerMask, SanitizerMask> IncompatibleGroups[] = { + std::make_pair(Address, Thread), std::make_pair(Address, Memory), + std::make_pair(Thread, Memory), std::make_pair(Leak, Thread), + std::make_pair(Leak, Memory)}; + for (auto G : IncompatibleGroups) { + SanitizerMask Group = G.first; + if (Kinds & Group) { + if (SanitizerMask Incompatible = Kinds & G.second) { + D.Diag(clang::diag::err_drv_argument_not_allowed_with) + << lastArgumentForMask(D, Args, Group) + << lastArgumentForMask(D, Args, Incompatible); + Kinds &= ~Incompatible; + } } } - addAllOf(Sanitizers, Kinds); + // FIXME: Currently -fsanitize=leak is silently ignored in the presence of + // -fsanitize=address. Perhaps it should print an error, or perhaps + // -f(-no)sanitize=leak should change whether leak detection is enabled by + // default in ASan? // Parse -f(no-)?sanitize-recover flags. - unsigned RecoverableKinds = RecoverableByDefault; - unsigned DiagnosedUnrecoverableKinds = 0; + SanitizerMask RecoverableKinds = RecoverableByDefault; + SanitizerMask DiagnosedUnrecoverableKinds = 0; for (const auto *Arg : Args) { + const char *DeprecatedReplacement = nullptr; if (Arg->getOption().matches(options::OPT_fsanitize_recover)) { - // FIXME: Add deprecation notice, and then remove this flag. - RecoverableKinds |= expandGroups(LegacyFsanitizeRecoverMask); + DeprecatedReplacement = "-fsanitize-recover=undefined,integer"; + RecoverableKinds |= expandSanitizerGroups(LegacyFsanitizeRecoverMask); Arg->claim(); } else if (Arg->getOption().matches(options::OPT_fno_sanitize_recover)) { - // FIXME: Add deprecation notice, and then remove this flag. - RecoverableKinds &= ~expandGroups(LegacyFsanitizeRecoverMask); + DeprecatedReplacement = "-fno-sanitize-recover=undefined,integer"; + RecoverableKinds &= ~expandSanitizerGroups(LegacyFsanitizeRecoverMask); Arg->claim(); } else if (Arg->getOption().matches(options::OPT_fsanitize_recover_EQ)) { - unsigned Add = parseArgValues(D, Arg, true); + SanitizerMask Add = parseArgValues(D, Arg, true); // Report error if user explicitly tries to recover from unrecoverable // sanitizer. - if (unsigned KindsToDiagnose = + if (SanitizerMask KindsToDiagnose = Add & Unrecoverable & ~DiagnosedUnrecoverableKinds) { SanitizerSet SetToDiagnose; - addAllOf(SetToDiagnose, KindsToDiagnose); + SetToDiagnose.Mask |= KindsToDiagnose; D.Diag(diag::err_drv_unsupported_option_argument) << Arg->getOption().getName() << toString(SetToDiagnose); DiagnosedUnrecoverableKinds |= KindsToDiagnose; } - RecoverableKinds |= expandGroups(Add); + RecoverableKinds |= expandSanitizerGroups(Add); Arg->claim(); } else if (Arg->getOption().matches(options::OPT_fno_sanitize_recover_EQ)) { - RecoverableKinds &= ~expandGroups(parseArgValues(D, Arg, true)); + RecoverableKinds &= ~expandSanitizerGroups(parseArgValues(D, Arg, true)); Arg->claim(); } + if (DeprecatedReplacement) { + D.Diag(diag::warn_drv_deprecated_arg) << Arg->getAsString(Args) + << DeprecatedReplacement; + } } RecoverableKinds &= Kinds; RecoverableKinds &= ~Unrecoverable; - addAllOf(RecoverableSanitizers, RecoverableKinds); - - UbsanTrapOnError = - Args.hasFlag(options::OPT_fsanitize_undefined_trap_on_error, - options::OPT_fno_sanitize_undefined_trap_on_error, false); - // Warn about undefined sanitizer options that require runtime support. - if (UbsanTrapOnError && hasOneOf(Sanitizers, NotAllowedWithTrap)) { - D.Diag(clang::diag::err_drv_argument_not_allowed_with) - << lastArgumentForMask(D, Args, NotAllowedWithTrap) - << "-fsanitize-undefined-trap-on-error"; + // Setup blacklist files. + // Add default blacklist from resource directory. + { + std::string BLPath; + if (getDefaultBlacklist(D, Kinds, BLPath) && llvm::sys::fs::exists(BLPath)) + BlacklistFiles.push_back(BLPath); } - - // Check for incompatible sanitizers. - bool NeedsAsan = Sanitizers.has(SanitizerKind::Address); - bool NeedsTsan = Sanitizers.has(SanitizerKind::Thread); - bool NeedsMsan = Sanitizers.has(SanitizerKind::Memory); - bool NeedsLsan = Sanitizers.has(SanitizerKind::Leak); - if (NeedsAsan && NeedsTsan) - D.Diag(clang::diag::err_drv_argument_not_allowed_with) - << lastArgumentForKind(D, Args, SanitizerKind::Address) - << lastArgumentForKind(D, Args, SanitizerKind::Thread); - if (NeedsAsan && NeedsMsan) - D.Diag(clang::diag::err_drv_argument_not_allowed_with) - << lastArgumentForKind(D, Args, SanitizerKind::Address) - << lastArgumentForKind(D, Args, SanitizerKind::Memory); - if (NeedsTsan && NeedsMsan) - D.Diag(clang::diag::err_drv_argument_not_allowed_with) - << lastArgumentForKind(D, Args, SanitizerKind::Thread) - << lastArgumentForKind(D, Args, SanitizerKind::Memory); - if (NeedsLsan && NeedsTsan) - D.Diag(clang::diag::err_drv_argument_not_allowed_with) - << lastArgumentForKind(D, Args, SanitizerKind::Leak) - << lastArgumentForKind(D, Args, SanitizerKind::Thread); - if (NeedsLsan && NeedsMsan) - D.Diag(clang::diag::err_drv_argument_not_allowed_with) - << lastArgumentForKind(D, Args, SanitizerKind::Leak) - << lastArgumentForKind(D, Args, SanitizerKind::Memory); - // FIXME: Currently -fsanitize=leak is silently ignored in the presence of - // -fsanitize=address. Perhaps it should print an error, or perhaps - // -f(-no)sanitize=leak should change whether leak detection is enabled by - // default in ASan? - // Parse -f(no-)sanitize-blacklist options. - if (Arg *BLArg = Args.getLastArg(options::OPT_fsanitize_blacklist, - options::OPT_fno_sanitize_blacklist)) { - if (BLArg->getOption().matches(options::OPT_fsanitize_blacklist)) { - std::string BLPath = BLArg->getValue(); - if (llvm::sys::fs::exists(BLPath)) { - // Validate the blacklist format. - std::string BLError; - std::unique_ptr<llvm::SpecialCaseList> SCL( - llvm::SpecialCaseList::create(BLPath, BLError)); - if (!SCL.get()) - D.Diag(clang::diag::err_drv_malformed_sanitizer_blacklist) << BLError; - else - BlacklistFile = BLPath; - } else { + for (const auto *Arg : Args) { + if (Arg->getOption().matches(options::OPT_fsanitize_blacklist)) { + Arg->claim(); + std::string BLPath = Arg->getValue(); + if (llvm::sys::fs::exists(BLPath)) + BlacklistFiles.push_back(BLPath); + else D.Diag(clang::diag::err_drv_no_such_file) << BLPath; - } + } else if (Arg->getOption().matches(options::OPT_fno_sanitize_blacklist)) { + Arg->claim(); + BlacklistFiles.clear(); } - } else { - // If no -fsanitize-blacklist option is specified, try to look up for - // blacklist in the resource directory. - std::string BLPath; - if (getDefaultBlacklist(D, BLPath) && llvm::sys::fs::exists(BLPath)) - BlacklistFile = BLPath; + } + // Validate blacklists format. + { + std::string BLError; + std::unique_ptr<llvm::SpecialCaseList> SCL( + llvm::SpecialCaseList::create(BlacklistFiles, BLError)); + if (!SCL.get()) + D.Diag(clang::diag::err_drv_malformed_sanitizer_blacklist) << BLError; } // Parse -f[no-]sanitize-memory-track-origins[=level] options. - if (NeedsMsan) { + if (AllAddedKinds & Memory) { if (Arg *A = Args.getLastArg(options::OPT_fsanitize_memory_track_origins_EQ, options::OPT_fsanitize_memory_track_origins, options::OPT_fno_sanitize_memory_track_origins)) { if (A->getOption().matches(options::OPT_fsanitize_memory_track_origins)) { - MsanTrackOrigins = 1; + MsanTrackOrigins = 2; } else if (A->getOption().matches( options::OPT_fno_sanitize_memory_track_origins)) { MsanTrackOrigins = 0; @@ -330,18 +356,72 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC, } } - // Parse -fsanitize-coverage=N. Currently one of asan/msan/lsan is required. - if (hasOneOf(Sanitizers, SupportsCoverage)) { - if (Arg *A = Args.getLastArg(options::OPT_fsanitize_coverage)) { - StringRef S = A->getValue(); - // Legal values are 0..4. - if (S.getAsInteger(0, SanitizeCoverage) || SanitizeCoverage < 0 || - SanitizeCoverage > 4) - D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S; + // Parse -f(no-)?sanitize-coverage flags if coverage is supported by the + // enabled sanitizers. + if (AllAddedKinds & SupportsCoverage) { + for (const auto *Arg : Args) { + if (Arg->getOption().matches(options::OPT_fsanitize_coverage)) { + Arg->claim(); + int LegacySanitizeCoverage; + if (Arg->getNumValues() == 1 && + !StringRef(Arg->getValue(0)) + .getAsInteger(0, LegacySanitizeCoverage) && + LegacySanitizeCoverage >= 0 && LegacySanitizeCoverage <= 4) { + // TODO: Add deprecation notice for this form. + switch (LegacySanitizeCoverage) { + case 0: + CoverageFeatures = 0; + break; + case 1: + CoverageFeatures = CoverageFunc; + break; + case 2: + CoverageFeatures = CoverageBB; + break; + case 3: + CoverageFeatures = CoverageEdge; + break; + case 4: + CoverageFeatures = CoverageEdge | CoverageIndirCall; + break; + } + continue; + } + CoverageFeatures |= parseCoverageFeatures(D, Arg); + } else if (Arg->getOption().matches(options::OPT_fno_sanitize_coverage)) { + Arg->claim(); + CoverageFeatures &= ~parseCoverageFeatures(D, Arg); + } } } - - if (NeedsAsan) { + // Choose at most one coverage type: function, bb, or edge. + if ((CoverageFeatures & CoverageFunc) && (CoverageFeatures & CoverageBB)) + D.Diag(clang::diag::err_drv_argument_not_allowed_with) + << "-fsanitize-coverage=func" + << "-fsanitize-coverage=bb"; + if ((CoverageFeatures & CoverageFunc) && (CoverageFeatures & CoverageEdge)) + D.Diag(clang::diag::err_drv_argument_not_allowed_with) + << "-fsanitize-coverage=func" + << "-fsanitize-coverage=edge"; + if ((CoverageFeatures & CoverageBB) && (CoverageFeatures & CoverageEdge)) + D.Diag(clang::diag::err_drv_argument_not_allowed_with) + << "-fsanitize-coverage=bb" + << "-fsanitize-coverage=edge"; + // Basic block tracing and 8-bit counters require some type of coverage + // enabled. + int CoverageTypes = CoverageFunc | CoverageBB | CoverageEdge; + if ((CoverageFeatures & CoverageTraceBB) && + !(CoverageFeatures & CoverageTypes)) + D.Diag(clang::diag::err_drv_argument_only_allowed_with) + << "-fsanitize-coverage=trace-bb" + << "-fsanitize-coverage=(func|bb|edge)"; + if ((CoverageFeatures & Coverage8bitCounters) && + !(CoverageFeatures & CoverageTypes)) + D.Diag(clang::diag::err_drv_argument_only_allowed_with) + << "-fsanitize-coverage=8bit-counters" + << "-fsanitize-coverage=(func|bb|edge)"; + + if (AllAddedKinds & Address) { AsanSharedRuntime = Args.hasArg(options::OPT_shared_libasan) || (TC.getTriple().getEnvironment() == llvm::Triple::Android); @@ -367,7 +447,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC, case options::OPT__SLASH_LDd: D.Diag(clang::diag::err_drv_argument_not_allowed_with) << WindowsDebugRTArg->getAsString(Args) - << lastArgumentForKind(D, Args, SanitizerKind::Address); + << lastArgumentForMask(D, Args, Address); D.Diag(clang::diag::note_drv_address_sanitizer_debug_runtime); } } @@ -376,12 +456,16 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC, // Parse -link-cxx-sanitizer flag. LinkCXXRuntimes = Args.hasArg(options::OPT_fsanitize_link_cxx_runtime) || D.CCCIsCXX(); + + // Finally, initialize the set of available and recoverable sanitizers. + Sanitizers.Mask |= Kinds; + RecoverableSanitizers.Mask |= RecoverableKinds; } static std::string toString(const clang::SanitizerSet &Sanitizers) { std::string Res; #define SANITIZER(NAME, ID) \ - if (Sanitizers.has(clang::SanitizerKind::ID)) { \ + if (Sanitizers.has(ID)) { \ if (!Res.empty()) \ Res += ","; \ Res += NAME; \ @@ -403,9 +487,9 @@ void SanitizerArgs::addArgs(const llvm::opt::ArgList &Args, if (UbsanTrapOnError) CmdArgs.push_back("-fsanitize-undefined-trap-on-error"); - if (!BlacklistFile.empty()) { + for (const auto &BLPath : BlacklistFiles) { SmallString<64> BlacklistOpt("-fsanitize-blacklist="); - BlacklistOpt += BlacklistFile; + BlacklistOpt += BLPath; CmdArgs.push_back(Args.MakeArgString(BlacklistOpt)); } @@ -415,67 +499,47 @@ void SanitizerArgs::addArgs(const llvm::opt::ArgList &Args, if (AsanFieldPadding) CmdArgs.push_back(Args.MakeArgString("-fsanitize-address-field-padding=" + llvm::utostr(AsanFieldPadding))); - if (SanitizeCoverage) - CmdArgs.push_back(Args.MakeArgString("-fsanitize-coverage=" + - llvm::utostr(SanitizeCoverage))); - // Workaround for PR16386. - if (Sanitizers.has(SanitizerKind::Memory)) - CmdArgs.push_back(Args.MakeArgString("-fno-assume-sane-operator-new")); -} - -bool SanitizerArgs::getDefaultBlacklist(const Driver &D, std::string &BLPath) { - const char *BlacklistFile = nullptr; - if (Sanitizers.has(SanitizerKind::Address)) - BlacklistFile = "asan_blacklist.txt"; - else if (Sanitizers.has(SanitizerKind::Memory)) - BlacklistFile = "msan_blacklist.txt"; - else if (Sanitizers.has(SanitizerKind::Thread)) - BlacklistFile = "tsan_blacklist.txt"; - else if (Sanitizers.has(SanitizerKind::DataFlow)) - BlacklistFile = "dfsan_abilist.txt"; - - if (BlacklistFile) { - SmallString<64> Path(D.ResourceDir); - llvm::sys::path::append(Path, BlacklistFile); - BLPath = Path.str(); - return true; + // Translate available CoverageFeatures to corresponding clang-cc1 flags. + std::pair<int, const char *> CoverageFlags[] = { + std::make_pair(CoverageFunc, "-fsanitize-coverage-type=1"), + std::make_pair(CoverageBB, "-fsanitize-coverage-type=2"), + std::make_pair(CoverageEdge, "-fsanitize-coverage-type=3"), + std::make_pair(CoverageIndirCall, "-fsanitize-coverage-indirect-calls"), + std::make_pair(CoverageTraceBB, "-fsanitize-coverage-trace-bb"), + std::make_pair(CoverageTraceCmp, "-fsanitize-coverage-trace-cmp"), + std::make_pair(Coverage8bitCounters, "-fsanitize-coverage-8bit-counters")}; + for (auto F : CoverageFlags) { + if (CoverageFeatures & F.first) + CmdArgs.push_back(Args.MakeArgString(F.second)); } - return false; -} -unsigned parseValue(const char *Value) { - unsigned ParsedKind = llvm::StringSwitch<SanitizeKind>(Value) -#define SANITIZER(NAME, ID) .Case(NAME, ID) -#define SANITIZER_GROUP(NAME, ID, ALIAS) .Case(NAME, ID##Group) -#include "clang/Basic/Sanitizers.def" - .Default(SanitizeKind()); - return ParsedKind; -} -unsigned expandGroups(unsigned Kinds) { -#define SANITIZER(NAME, ID) -#define SANITIZER_GROUP(NAME, ID, ALIAS) if (Kinds & ID##Group) Kinds |= ID; -#include "clang/Basic/Sanitizers.def" - return Kinds; + // MSan: Workaround for PR16386. + // ASan: This is mainly to help LSan with cases such as + // https://code.google.com/p/address-sanitizer/issues/detail?id=373 + // We can't make this conditional on -fsanitize=leak, as that flag shouldn't + // affect compilation. + if (Sanitizers.has(Memory) || Sanitizers.has(Address)) + CmdArgs.push_back(Args.MakeArgString("-fno-assume-sane-operator-new")); } -unsigned parseArgValues(const Driver &D, const llvm::opt::Arg *A, - bool DiagnoseErrors) { +SanitizerMask parseArgValues(const Driver &D, const llvm::opt::Arg *A, + bool DiagnoseErrors) { assert((A->getOption().matches(options::OPT_fsanitize_EQ) || A->getOption().matches(options::OPT_fno_sanitize_EQ) || A->getOption().matches(options::OPT_fsanitize_recover_EQ) || A->getOption().matches(options::OPT_fno_sanitize_recover_EQ)) && "Invalid argument in parseArgValues!"); - unsigned Kinds = 0; - for (unsigned I = 0, N = A->getNumValues(); I != N; ++I) { - const char *Value = A->getValue(I); - unsigned Kind; + SanitizerMask Kinds = 0; + for (int i = 0, n = A->getNumValues(); i != n; ++i) { + const char *Value = A->getValue(i); + SanitizerMask Kind; // Special case: don't accept -fsanitize=all. if (A->getOption().matches(options::OPT_fsanitize_EQ) && 0 == strcmp("all", Value)) Kind = 0; else - Kind = parseValue(Value); + Kind = parseSanitizerValue(Value, /*AllowGroups=*/true); if (Kind) Kinds |= Kind; @@ -486,34 +550,61 @@ unsigned parseArgValues(const Driver &D, const llvm::opt::Arg *A, return Kinds; } +int parseCoverageFeatures(const Driver &D, const llvm::opt::Arg *A) { + assert(A->getOption().matches(options::OPT_fsanitize_coverage) || + A->getOption().matches(options::OPT_fno_sanitize_coverage)); + int Features = 0; + for (int i = 0, n = A->getNumValues(); i != n; ++i) { + const char *Value = A->getValue(i); + int F = llvm::StringSwitch<int>(Value) + .Case("func", CoverageFunc) + .Case("bb", CoverageBB) + .Case("edge", CoverageEdge) + .Case("indirect-calls", CoverageIndirCall) + .Case("trace-bb", CoverageTraceBB) + .Case("trace-cmp", CoverageTraceCmp) + .Case("8bit-counters", Coverage8bitCounters) + .Default(0); + if (F == 0) + D.Diag(clang::diag::err_drv_unsupported_option_argument) + << A->getOption().getName() << Value; + Features |= F; + } + return Features; +} + std::string lastArgumentForMask(const Driver &D, const llvm::opt::ArgList &Args, - unsigned Mask) { + SanitizerMask Mask) { for (llvm::opt::ArgList::const_reverse_iterator I = Args.rbegin(), E = Args.rend(); I != E; ++I) { const auto *Arg = *I; if (Arg->getOption().matches(options::OPT_fsanitize_EQ)) { - unsigned AddKinds = expandGroups(parseArgValues(D, Arg, false)); + SanitizerMask AddKinds = + expandSanitizerGroups(parseArgValues(D, Arg, false)); if (AddKinds & Mask) return describeSanitizeArg(Arg, Mask); } else if (Arg->getOption().matches(options::OPT_fno_sanitize_EQ)) { - unsigned RemoveKinds = expandGroups(parseArgValues(D, Arg, false)); + SanitizerMask RemoveKinds = + expandSanitizerGroups(parseArgValues(D, Arg, false)); Mask &= ~RemoveKinds; } } llvm_unreachable("arg list didn't provide expected value"); } -std::string describeSanitizeArg(const llvm::opt::Arg *A, unsigned Mask) { +std::string describeSanitizeArg(const llvm::opt::Arg *A, SanitizerMask Mask) { assert(A->getOption().matches(options::OPT_fsanitize_EQ) && "Invalid argument in describeSanitizerArg!"); std::string Sanitizers; - for (unsigned I = 0, N = A->getNumValues(); I != N; ++I) { - if (expandGroups(parseValue(A->getValue(I))) & Mask) { + for (int i = 0, n = A->getNumValues(); i != n; ++i) { + if (expandSanitizerGroups( + parseSanitizerValue(A->getValue(i), /*AllowGroups=*/true)) & + Mask) { if (!Sanitizers.empty()) Sanitizers += ","; - Sanitizers += A->getValue(I); + Sanitizers += A->getValue(i); } } diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp index 2bcfecf..f7b7402 100644 --- a/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp @@ -26,14 +26,47 @@ using namespace clang::driver; using namespace clang; using namespace llvm::opt; +static llvm::opt::Arg *GetRTTIArgument(const ArgList &Args) { + return Args.getLastArg(options::OPT_mkernel, options::OPT_fapple_kext, + options::OPT_fno_rtti, options::OPT_frtti); +} + +static ToolChain::RTTIMode CalculateRTTIMode(const ArgList &Args, + const llvm::Triple &Triple, + const Arg *CachedRTTIArg) { + // Explicit rtti/no-rtti args + if (CachedRTTIArg) { + if (CachedRTTIArg->getOption().matches(options::OPT_frtti)) + return ToolChain::RM_EnabledExplicitly; + else + return ToolChain::RM_DisabledExplicitly; + } + + // -frtti is default, except for the PS4 CPU. + if (!Triple.isPS4CPU()) + return ToolChain::RM_EnabledImplicitly; + + // On the PS4, turning on c++ exceptions turns on rtti. + // We're assuming that, if we see -fexceptions, rtti gets turned on. + Arg *Exceptions = Args.getLastArgNoClaim( + options::OPT_fcxx_exceptions, options::OPT_fno_cxx_exceptions, + options::OPT_fexceptions, options::OPT_fno_exceptions); + if (Exceptions && + (Exceptions->getOption().matches(options::OPT_fexceptions) || + Exceptions->getOption().matches(options::OPT_fcxx_exceptions))) + return ToolChain::RM_EnabledImplicitly; + + return ToolChain::RM_DisabledImplicitly; +} + ToolChain::ToolChain(const Driver &D, const llvm::Triple &T, const ArgList &Args) - : D(D), Triple(T), Args(Args) { + : D(D), Triple(T), Args(Args), CachedRTTIArg(GetRTTIArgument(Args)), + CachedRTTIMode(CalculateRTTIMode(Args, Triple, CachedRTTIArg)) { if (Arg *A = Args.getLastArg(options::OPT_mthread_model)) if (!isThreadModelSupported(A->getValue())) D.Diag(diag::err_drv_invalid_thread_model_for_target) - << A->getValue() - << A->getAsString(Args); + << A->getValue() << A->getAsString(Args); } ToolChain::~ToolChain() { @@ -264,18 +297,18 @@ std::string ToolChain::ComputeLLVMTriple(const ArgList &Args, // '-mbig-endian'/'-EB'. if (Arg *A = Args.getLastArg(options::OPT_mlittle_endian, options::OPT_mbig_endian)) { - if (A->getOption().matches(options::OPT_mlittle_endian)) - IsBigEndian = false; - else - IsBigEndian = true; + IsBigEndian = !A->getOption().matches(options::OPT_mlittle_endian); } // Thumb2 is the default for V7 on Darwin. // // FIXME: Thumb should just be another -target-feaure, not in the triple. - StringRef Suffix = Triple.isOSBinFormatMachO() - ? tools::arm::getLLVMArchSuffixForARM(tools::arm::getARMCPUForMArch(Args, Triple)) - : tools::arm::getLLVMArchSuffixForARM(tools::arm::getARMTargetCPU(Args, Triple)); + StringRef CPU = Triple.isOSBinFormatMachO() + ? tools::arm::getARMCPUForMArch(Args, Triple) + : tools::arm::getARMTargetCPU(Args, Triple); + StringRef Suffix = + tools::arm::getLLVMArchSuffixForARM(CPU, + tools::arm::getARMArch(Args, Triple)); bool ThumbDefault = Suffix.startswith("v6m") || Suffix.startswith("v7m") || Suffix.startswith("v7em") || (Suffix.startswith("v7") && getTriple().isOSBinFormatMachO()); diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp index 8bd8298..6d52ab9 100644 --- a/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp @@ -108,6 +108,7 @@ bool Darwin::hasBlocksRuntime() const { } } +// FIXME: Use ARMTargetParser. static const char *GetArmArchForMArch(StringRef Value) { return llvm::StringSwitch<const char*>(Value) .Case("armv6k", "armv6") @@ -125,6 +126,7 @@ static const char *GetArmArchForMArch(StringRef Value) { .Default(nullptr); } +// FIXME: Use ARMTargetParser. static const char *GetArmArchForMCpu(StringRef Value) { return llvm::StringSwitch<const char *>(Value) .Cases("arm9e", "arm946e-s", "arm966e-s", "arm968e-s", "arm926ej-s","armv5") @@ -132,11 +134,11 @@ static const char *GetArmArchForMCpu(StringRef Value) { .Cases("arm1020t", "arm1020e", "arm1022e", "arm1026ej-s", "armv5") .Case("xscale", "xscale") .Cases("arm1136j-s", "arm1136jf-s", "arm1176jz-s", "arm1176jzf-s", "armv6") - .Case("cortex-m0", "armv6m") + .Cases("sc000", "cortex-m0", "cortex-m0plus", "cortex-m1", "armv6m") .Cases("cortex-a5", "cortex-a7", "cortex-a8", "armv7") .Cases("cortex-a9", "cortex-a12", "cortex-a15", "cortex-a17", "krait", "armv7") - .Cases("cortex-r4", "cortex-r5", "armv7r") - .Case("cortex-m3", "armv7m") + .Cases("cortex-r4", "cortex-r4f", "cortex-r5", "cortex-r7", "armv7r") + .Cases("sc300", "cortex-m3", "armv7m") .Cases("cortex-m4", "cortex-m7", "armv7em") .Case("swift", "armv7s") .Default(nullptr); @@ -302,8 +304,8 @@ void MachO::AddLinkRuntimeLib(const ArgList &Args, ArgStringList &CmdArgs, // For now, allow missing resource libraries to support developers who may // not have compiler-rt checked out or integrated into their build (unless // we explicitly force linking with this library). - if (AlwaysLink || llvm::sys::fs::exists(P.str())) - CmdArgs.push_back(Args.MakeArgString(P.str())); + if (AlwaysLink || llvm::sys::fs::exists(P)) + CmdArgs.push_back(Args.MakeArgString(P)); // Adding the rpaths might negatively interact when other rpaths are involved, // so we should make sure we add the rpaths last, after all user-specified @@ -320,7 +322,49 @@ void MachO::AddLinkRuntimeLib(const ArgList &Args, ArgStringList &CmdArgs, // Add the path to the resource dir to rpath to support using the dylib // from the default location without copying. CmdArgs.push_back("-rpath"); - CmdArgs.push_back(Args.MakeArgString(Dir.str())); + CmdArgs.push_back(Args.MakeArgString(Dir)); + } +} + +void Darwin::addProfileRTLibs(const ArgList &Args, + ArgStringList &CmdArgs) const { + if (!(Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs, + false) || + Args.hasArg(options::OPT_fprofile_generate) || + Args.hasArg(options::OPT_fprofile_instr_generate) || + Args.hasArg(options::OPT_fprofile_instr_generate_EQ) || + Args.hasArg(options::OPT_fcreate_profile) || + Args.hasArg(options::OPT_coverage))) + return; + + // Select the appropriate runtime library for the target. + if (isTargetIOSBased()) + AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.profile_ios.a", + /*AlwaysLink*/ true); + else + AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.profile_osx.a", + /*AlwaysLink*/ true); +} + +void DarwinClang::AddLinkSanitizerLibArgs(const ArgList &Args, + ArgStringList &CmdArgs, + StringRef Sanitizer) const { + if (!Args.hasArg(options::OPT_dynamiclib) && + !Args.hasArg(options::OPT_bundle)) { + // Sanitizer runtime libraries requires C++. + AddCXXStdlibLibArgs(Args, CmdArgs); + } + assert(isTargetMacOS() || isTargetIOSSimulator()); + StringRef OS = isTargetMacOS() ? "osx" : "iossim"; + AddLinkRuntimeLib(Args, CmdArgs, (Twine("libclang_rt.") + Sanitizer + "_" + + OS + "_dynamic.dylib").str(), + /*AlwaysLink*/ true, /*IsEmbedded*/ false, + /*AddRPath*/ true); + + if (GetCXXStdlibType(Args) == ToolChain::CST_Libcxx) { + // Add explicit dependcy on -lc++abi, as -lc++ doesn't re-export + // all RTTI-related symbols that UBSan uses. + CmdArgs.push_back("-lc++abi"); } } @@ -352,63 +396,26 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args, return; } - // If we are building profile support, link that library in. - if (Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs, - false) || - Args.hasArg(options::OPT_fprofile_generate) || - Args.hasArg(options::OPT_fprofile_instr_generate) || - Args.hasArg(options::OPT_fcreate_profile) || - Args.hasArg(options::OPT_coverage)) { - // Select the appropriate runtime library for the target. - if (isTargetIOSBased()) - AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.profile_ios.a"); - else - AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.profile_osx.a"); - } const SanitizerArgs &Sanitize = getSanitizerArgs(); - // Add Ubsan runtime library, if required. - if (Sanitize.needsUbsanRt()) { - // FIXME: Move this check to SanitizerArgs::filterUnsupportedKinds. - if (isTargetIOSBased()) { + if (Sanitize.needsAsanRt()) { + if (!isTargetMacOS() && !isTargetIOSSimulator()) { + // FIXME: Move this check to SanitizerArgs::filterUnsupportedKinds. getDriver().Diag(diag::err_drv_clang_unsupported_per_platform) - << "-fsanitize=undefined"; + << "-fsanitize=address"; } else { - assert(isTargetMacOS() && "unexpected non OS X target"); - AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.ubsan_osx.a", true); - - // The Ubsan runtime library requires C++. - AddCXXStdlibLibArgs(Args, CmdArgs); + AddLinkSanitizerLibArgs(Args, CmdArgs, "asan"); } } - // Add ASAN runtime library, if required. Dynamic libraries and bundles - // should not be linked with the runtime library. - if (Sanitize.needsAsanRt()) { - // FIXME: Move this check to SanitizerArgs::filterUnsupportedKinds. - if (isTargetIPhoneOS()) { + if (Sanitize.needsUbsanRt()) { + if (!isTargetMacOS() && !isTargetIOSSimulator()) { + // FIXME: Move this check to SanitizerArgs::filterUnsupportedKinds. getDriver().Diag(diag::err_drv_clang_unsupported_per_platform) - << "-fsanitize=address"; + << "-fsanitize=undefined"; } else { - if (!Args.hasArg(options::OPT_dynamiclib) && - !Args.hasArg(options::OPT_bundle)) { - // The ASAN runtime library requires C++. - AddCXXStdlibLibArgs(Args, CmdArgs); - } - if (isTargetMacOS()) { - AddLinkRuntimeLib(Args, CmdArgs, - "libclang_rt.asan_osx_dynamic.dylib", - /*AlwaysLink*/ true, /*IsEmbedded*/ false, - /*AddRPath*/ true); - } else { - if (isTargetIOSSimulator()) { - AddLinkRuntimeLib(Args, CmdArgs, - "libclang_rt.asan_iossim_dynamic.dylib", - /*AlwaysLink*/ true, /*IsEmbedded*/ false, - /*AddRPath*/ true); - } - } + AddLinkSanitizerLibArgs(Args, CmdArgs, "ubsan"); } } @@ -599,11 +606,11 @@ void DarwinClang::AddCXXStdlibLibArgs(const ArgList &Args, SmallString<128> P(A->getValue()); llvm::sys::path::append(P, "usr", "lib", "libstdc++.dylib"); - if (!llvm::sys::fs::exists(P.str())) { + if (!llvm::sys::fs::exists(P)) { llvm::sys::path::remove_filename(P); llvm::sys::path::append(P, "libstdc++.6.dylib"); - if (llvm::sys::fs::exists(P.str())) { - CmdArgs.push_back(Args.MakeArgString(P.str())); + if (llvm::sys::fs::exists(P)) { + CmdArgs.push_back(Args.MakeArgString(P)); return; } } @@ -646,8 +653,8 @@ void DarwinClang::AddCCKextLibArgs(const ArgList &Args, // For now, allow missing resource libraries to support developers who may // not have compiler-rt checked out or integrated into their build. - if (llvm::sys::fs::exists(P.str())) - CmdArgs.push_back(Args.MakeArgString(P.str())); + if (llvm::sys::fs::exists(P)) + CmdArgs.push_back(Args.MakeArgString(P)); } DerivedArgList *MachO::TranslateArgs(const DerivedArgList &Args, @@ -1498,11 +1505,12 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const { namespace { // Filter to remove Multilibs that don't exist as a suffix to Path -class FilterNonExistent : public MultilibSet::FilterCallback { - std::string Base; +class FilterNonExistent { + StringRef Base; + public: - FilterNonExistent(std::string Base) : Base(Base) {} - bool operator()(const Multilib &M) const override { + FilterNonExistent(StringRef Base) : Base(Base) {} + bool operator()(const Multilib &M) { return !llvm::sys::fs::exists(Base + M.gccSuffix() + "/crtbegin.o"); } }; @@ -1783,10 +1791,13 @@ static bool findMIPSMultilibs(const llvm::Triple &TargetTriple, StringRef Path, addMultilibFlag(isMips64(TargetArch), "m64", Flags); addMultilibFlag(isMips16(Args), "mips16", Flags); addMultilibFlag(CPUName == "mips32", "march=mips32", Flags); - addMultilibFlag(CPUName == "mips32r2", "march=mips32r2", Flags); + addMultilibFlag(CPUName == "mips32r2" || CPUName == "mips32r3" || + CPUName == "mips32r5", + "march=mips32r2", Flags); addMultilibFlag(CPUName == "mips32r6", "march=mips32r6", Flags); addMultilibFlag(CPUName == "mips64", "march=mips64", Flags); - addMultilibFlag(CPUName == "mips64r2" || CPUName == "octeon", + addMultilibFlag(CPUName == "mips64r2" || CPUName == "mips64r3" || + CPUName == "mips64r5" || CPUName == "octeon", "march=mips64r2", Flags); addMultilibFlag(isMicroMips(Args), "mmicromips", Flags); addMultilibFlag(tools::mips::isUCLibc(Args), "muclibc", Flags); @@ -2073,6 +2084,7 @@ bool Generic_GCC::IsIntegratedAssemblerDefault() const { getTriple().getArch() == llvm::Triple::ppc64 || getTriple().getArch() == llvm::Triple::ppc64le || getTriple().getArch() == llvm::Triple::sparc || + getTriple().getArch() == llvm::Triple::sparcel || getTriple().getArch() == llvm::Triple::sparcv9 || getTriple().getArch() == llvm::Triple::systemz; } @@ -2085,7 +2097,8 @@ void Generic_ELF::addClangTargetOptions(const ArgList &DriverArgs, getTriple().getArch() == llvm::Triple::aarch64_be || (getTriple().getOS() == llvm::Triple::Linux && (!V.isOlderThan(4, 7, 0) || - getTriple().getEnvironment() == llvm::Triple::Android)); + getTriple().getEnvironment() == llvm::Triple::Android)) || + getTriple().getOS() == llvm::Triple::NaCl; if (DriverArgs.hasFlag(options::OPT_fuse_init_array, options::OPT_fno_use_init_array, @@ -2115,6 +2128,30 @@ std::string Hexagon_TC::GetGnuDir(const std::string &InstalledDir, return InstallRelDir; } +const char *Hexagon_TC::GetSmallDataThreshold(const ArgList &Args) +{ + Arg *A; + + A = Args.getLastArg(options::OPT_G, + options::OPT_G_EQ, + options::OPT_msmall_data_threshold_EQ); + if (A) + return A->getValue(); + + A = Args.getLastArg(options::OPT_shared, + options::OPT_fpic, + options::OPT_fPIC); + if (A) + return "0"; + + return 0; +} + +bool Hexagon_TC::UsesG0(const char* smallDataThreshold) +{ + return smallDataThreshold && smallDataThreshold[0] == '0'; +} + static void GetHexagonLibraryPaths( const ArgList &Args, const std::string &Ver, @@ -2246,7 +2283,7 @@ void Hexagon_TC::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs, llvm::sys::path::append(IncludeDir, "hexagon/include/c++/"); llvm::sys::path::append(IncludeDir, Ver); - addSystemInclude(DriverArgs, CC1Args, IncludeDir.str()); + addSystemInclude(DriverArgs, CC1Args, IncludeDir); } ToolChain::CXXStdlibType @@ -2308,6 +2345,159 @@ StringRef Hexagon_TC::GetTargetCPU(const ArgList &Args) } // End Hexagon +/// NaCl Toolchain +NaCl_TC::NaCl_TC(const Driver &D, const llvm::Triple &Triple, + const ArgList &Args) + : Generic_ELF(D, Triple, Args) { + + // Remove paths added by Generic_GCC. NaCl Toolchain cannot use the + // default paths, and must instead only use the paths provided + // with this toolchain based on architecture. + path_list& file_paths = getFilePaths(); + path_list& prog_paths = getProgramPaths(); + + file_paths.clear(); + prog_paths.clear(); + + // Path for library files (libc.a, ...) + std::string FilePath(getDriver().Dir + "/../"); + + // Path for tools (clang, ld, etc..) + std::string ProgPath(getDriver().Dir + "/../"); + + // Path for toolchain libraries (libgcc.a, ...) + std::string ToolPath(getDriver().ResourceDir + "/lib/"); + + switch(Triple.getArch()) { + case llvm::Triple::x86: { + file_paths.push_back(FilePath + "x86_64-nacl/lib32"); + file_paths.push_back(FilePath + "x86_64-nacl/usr/lib32"); + prog_paths.push_back(ProgPath + "x86_64-nacl/bin"); + file_paths.push_back(ToolPath + "i686-nacl"); + break; + } + case llvm::Triple::x86_64: { + file_paths.push_back(FilePath + "x86_64-nacl/lib"); + file_paths.push_back(FilePath + "x86_64-nacl/usr/lib"); + prog_paths.push_back(ProgPath + "x86_64-nacl/bin"); + file_paths.push_back(ToolPath + "x86_64-nacl"); + break; + } + case llvm::Triple::arm: { + file_paths.push_back(FilePath + "arm-nacl/lib"); + file_paths.push_back(FilePath + "arm-nacl/usr/lib"); + prog_paths.push_back(ProgPath + "arm-nacl/bin"); + file_paths.push_back(ToolPath + "arm-nacl"); + break; + } + default: + break; + } + + // Use provided linker, not system linker + Linker = GetProgramPath("ld"); + NaClArmMacrosPath = GetFilePath("nacl-arm-macros.s"); +} + +void NaCl_TC::AddClangSystemIncludeArgs(const ArgList &DriverArgs, + ArgStringList &CC1Args) const { + const Driver &D = getDriver(); + if (DriverArgs.hasArg(options::OPT_nostdinc)) + return; + + if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) { + SmallString<128> P(D.ResourceDir); + llvm::sys::path::append(P, "include"); + addSystemInclude(DriverArgs, CC1Args, P.str()); + } + + if (DriverArgs.hasArg(options::OPT_nostdlibinc)) + return; + + SmallString<128> P(D.Dir + "/../"); + if (getTriple().getArch() == llvm::Triple::arm) { + llvm::sys::path::append(P, "arm-nacl/usr/include"); + } else if (getTriple().getArch() == llvm::Triple::x86) { + llvm::sys::path::append(P, "x86_64-nacl/usr/include"); + } else if (getTriple().getArch() == llvm::Triple::x86_64) { + llvm::sys::path::append(P, "x86_64-nacl/usr/include"); + } else { + return; + } + + addSystemInclude(DriverArgs, CC1Args, P.str()); + llvm::sys::path::remove_filename(P); + llvm::sys::path::remove_filename(P); + llvm::sys::path::append(P, "include"); + addSystemInclude(DriverArgs, CC1Args, P.str()); +} + +void NaCl_TC::AddCXXStdlibLibArgs(const ArgList &Args, + ArgStringList &CmdArgs) const { + // Check for -stdlib= flags. We only support libc++ but this consumes the arg + // if the value is libc++, and emits an error for other values. + GetCXXStdlibType(Args); + CmdArgs.push_back("-lc++"); +} + +void NaCl_TC::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs, + ArgStringList &CC1Args) const { + const Driver &D = getDriver(); + if (DriverArgs.hasArg(options::OPT_nostdlibinc) || + DriverArgs.hasArg(options::OPT_nostdincxx)) + return; + + // Check for -stdlib= flags. We only support libc++ but this consumes the arg + // if the value is libc++, and emits an error for other values. + GetCXXStdlibType(DriverArgs); + + if (getTriple().getArch() == llvm::Triple::arm) { + SmallString<128> P(D.Dir + "/../"); + llvm::sys::path::append(P, "arm-nacl/include/c++/v1"); + addSystemInclude(DriverArgs, CC1Args, P.str()); + } else if (getTriple().getArch() == llvm::Triple::x86) { + SmallString<128> P(D.Dir + "/../"); + llvm::sys::path::append(P, "x86_64-nacl/include/c++/v1"); + addSystemInclude(DriverArgs, CC1Args, P.str()); + } else if (getTriple().getArch() == llvm::Triple::x86_64) { + SmallString<128> P(D.Dir + "/../"); + llvm::sys::path::append(P, "x86_64-nacl/include/c++/v1"); + addSystemInclude(DriverArgs, CC1Args, P.str()); + } +} + +ToolChain::CXXStdlibType NaCl_TC::GetCXXStdlibType(const ArgList &Args) const { + if (Arg *A = Args.getLastArg(options::OPT_stdlib_EQ)) { + StringRef Value = A->getValue(); + if (Value == "libc++") + return ToolChain::CST_Libcxx; + getDriver().Diag(diag::err_drv_invalid_stdlib_name) + << A->getAsString(Args); + } + + return ToolChain::CST_Libcxx; +} + +std::string NaCl_TC::ComputeEffectiveClangTriple( + const ArgList &Args, types::ID InputType) const { + llvm::Triple TheTriple(ComputeLLVMTriple(Args, InputType)); + if (TheTriple.getArch() == llvm::Triple::arm && + TheTriple.getEnvironment() == llvm::Triple::UnknownEnvironment) + TheTriple.setEnvironment(llvm::Triple::GNUEABIHF); + return TheTriple.getTriple(); +} + +Tool *NaCl_TC::buildLinker() const { + return new tools::nacltools::Link(*this); +} + +Tool *NaCl_TC::buildAssembler() const { + if (getTriple().getArch() == llvm::Triple::arm) + return new tools::nacltools::AssembleARM(*this); + return new tools::gnutools::Assemble(*this); +} +// End NaCl + /// TCEToolChain - A tool chain using the llvm bitcode tools to perform /// all subcommands. See http://tce.cs.tut.fi for our peculiar target. /// Currently does not support anything else but compilation. @@ -2341,6 +2531,36 @@ bool TCEToolChain::isPICDefaultForced() const { return false; } +// CloudABI - CloudABI tool chain which can call ld(1) directly. + +CloudABI::CloudABI(const Driver &D, const llvm::Triple &Triple, + const ArgList &Args) + : Generic_ELF(D, Triple, Args) { + SmallString<128> P(getDriver().Dir); + llvm::sys::path::append(P, "..", getTriple().str(), "lib"); + getFilePaths().push_back(P.str()); +} + +void CloudABI::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs, + ArgStringList &CC1Args) const { + if (DriverArgs.hasArg(options::OPT_nostdlibinc) && + DriverArgs.hasArg(options::OPT_nostdincxx)) + return; + + SmallString<128> P(getDriver().Dir); + llvm::sys::path::append(P, "..", getTriple().str(), "include/c++/v1"); + addSystemInclude(DriverArgs, CC1Args, P.str()); +} + +void CloudABI::AddCXXStdlibLibArgs(const ArgList &Args, + ArgStringList &CmdArgs) const { + CmdArgs.push_back("-lc++"); + CmdArgs.push_back("-lc++abi"); + CmdArgs.push_back("-lunwind"); +} + +Tool *CloudABI::buildLinker() const { return new tools::cloudabi::Link(*this); } + /// OpenBSD - OpenBSD tool chain which can call as(1) and ld(1) directly. OpenBSD::OpenBSD(const Driver &D, const llvm::Triple& Triple, const ArgList &Args) @@ -2677,10 +2897,12 @@ enum Distro { DebianSqueeze, DebianWheezy, DebianJessie, + DebianStretch, Exherbo, RHEL4, RHEL5, RHEL6, + RHEL7, Fedora, OpenSUSE, UbuntuHardy, @@ -2696,11 +2918,13 @@ enum Distro { UbuntuRaring, UbuntuSaucy, UbuntuTrusty, + UbuntuUtopic, + UbuntuVivid, UnknownDistro }; static bool IsRedhat(enum Distro Distro) { - return Distro == Fedora || (Distro >= RHEL4 && Distro <= RHEL6); + return Distro == Fedora || (Distro >= RHEL4 && Distro <= RHEL7); } static bool IsOpenSUSE(enum Distro Distro) { @@ -2708,11 +2932,11 @@ static bool IsOpenSUSE(enum Distro Distro) { } static bool IsDebian(enum Distro Distro) { - return Distro >= DebianLenny && Distro <= DebianJessie; + return Distro >= DebianLenny && Distro <= DebianStretch; } static bool IsUbuntu(enum Distro Distro) { - return Distro >= UbuntuHardy && Distro <= UbuntuTrusty; + return Distro >= UbuntuHardy && Distro <= UbuntuVivid; } static Distro DetectDistro(llvm::Triple::ArchType Arch) { @@ -2739,6 +2963,8 @@ static Distro DetectDistro(llvm::Triple::ArchType Arch) { .Case("raring", UbuntuRaring) .Case("saucy", UbuntuSaucy) .Case("trusty", UbuntuTrusty) + .Case("utopic", UbuntuUtopic) + .Case("vivid", UbuntuVivid) .Default(UnknownDistro); return Version; } @@ -2750,7 +2976,9 @@ static Distro DetectDistro(llvm::Triple::ArchType Arch) { return Fedora; if (Data.startswith("Red Hat Enterprise Linux") || Data.startswith("CentOS")) { - if (Data.find("release 6") != StringRef::npos) + if (Data.find("release 7") != StringRef::npos) + return RHEL7; + else if (Data.find("release 6") != StringRef::npos) return RHEL6; else if (Data.find("release 5") != StringRef::npos) return RHEL5; @@ -2771,6 +2999,8 @@ static Distro DetectDistro(llvm::Triple::ArchType Arch) { return DebianWheezy; else if (Data.startswith("jessie/sid") || Data[0] == '8') return DebianJessie; + else if (Data.startswith("stretch/sid") || Data[0] == '9') + return DebianStretch; return UnknownDistro; } @@ -2967,8 +3197,7 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args) if (IsRedhat(Distro)) ExtraOpts.push_back("--no-add-needed"); - if (Distro == DebianSqueeze || Distro == DebianWheezy || - Distro == DebianJessie || IsOpenSUSE(Distro) || + if ((IsDebian(Distro) && Distro >= DebianSqueeze) || IsOpenSUSE(Distro) || (IsRedhat(Distro) && Distro != RHEL4 && Distro != RHEL5) || (IsUbuntu(Distro) && Distro >= UbuntuKarmic)) ExtraOpts.push_back("--build-id"); @@ -3144,7 +3373,7 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs, if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) { SmallString<128> P(D.ResourceDir); llvm::sys::path::append(P, "include"); - addSystemInclude(DriverArgs, CC1Args, P.str()); + addSystemInclude(DriverArgs, CC1Args, P); } if (DriverArgs.hasArg(options::OPT_nostdlibinc)) @@ -3168,7 +3397,7 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs, // Add include directories specific to the selected multilib set and multilib. if (GCCInstallation.isValid()) { - auto Callback = Multilibs.includeDirsCallback(); + const auto &Callback = Multilibs.includeDirsCallback(); if (Callback) { const auto IncludePaths = Callback(GCCInstallation.getInstallPath(), GCCInstallation.getTriple().str(), diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains.h b/contrib/llvm/tools/clang/lib/Driver/ToolChains.h index 47fb10d..8906e21 100644 --- a/contrib/llvm/tools/clang/lib/Driver/ToolChains.h +++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains.h @@ -152,7 +152,7 @@ protected: public: Generic_GCC(const Driver &D, const llvm::Triple &Triple, const llvm::opt::ArgList &Args); - ~Generic_GCC(); + ~Generic_GCC() override; void printVerboseInfo(raw_ostream &OS) const override; @@ -196,7 +196,7 @@ private: public: MachO(const Driver &D, const llvm::Triple &Triple, const llvm::opt::ArgList &Args); - ~MachO(); + ~MachO() override; /// @name MachO specific toolchain API /// { @@ -239,6 +239,13 @@ public: bool IsEmbedded = false, bool AddRPath = false) const; + /// Add any profiling runtime libraries that are needed. This is essentially a + /// MachO specific version of addProfileRT in Tools.cpp. + virtual void addProfileRTLibs(const llvm::opt::ArgList &Args, + llvm::opt::ArgStringList &CmdArgs) const { + // There aren't any profiling libs for embedded targets currently. + } + /// } /// @name ToolChain Implementation /// { @@ -345,7 +352,7 @@ private: public: Darwin(const Driver &D, const llvm::Triple &Triple, const llvm::opt::ArgList &Args); - ~Darwin(); + ~Darwin() override; std::string ComputeEffectiveClangTriple(const llvm::opt::ArgList &Args, types::ID InputType) const override; @@ -362,10 +369,12 @@ public: llvm::opt::ArgStringList &CmdArgs) const override; bool isKernelStatic() const override { - return !isTargetIPhoneOS() || isIPhoneOSVersionLT(6, 0) || - getTriple().getArch() == llvm::Triple::aarch64; + return !isTargetIPhoneOS() || isIPhoneOSVersionLT(6, 0); } + void addProfileRTLibs(const llvm::opt::ArgList &Args, + llvm::opt::ArgStringList &CmdArgs) const override; + protected: /// } /// @name Darwin specific Toolchain functions @@ -488,13 +497,17 @@ public: AddCCKextLibArgs(const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CmdArgs) const override; - virtual void addClangWarningOptions(llvm::opt::ArgStringList &CC1Args) - const override; + void addClangWarningOptions(llvm::opt::ArgStringList &CC1Args) const override; void AddLinkARCArgs(const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CmdArgs) const override; /// } + +private: + void AddLinkSanitizerLibArgs(const llvm::opt::ArgList &Args, + llvm::opt::ArgStringList &CmdArgs, + StringRef Sanitizer) const; }; class LLVM_LIBRARY_VISIBILITY Generic_ELF : public Generic_GCC { @@ -508,6 +521,31 @@ public: llvm::opt::ArgStringList &CC1Args) const override; }; +class LLVM_LIBRARY_VISIBILITY CloudABI : public Generic_ELF { +public: + CloudABI(const Driver &D, const llvm::Triple &Triple, + const llvm::opt::ArgList &Args); + bool HasNativeLLVMSupport() const override { return true; } + + bool IsMathErrnoDefault() const override { return false; } + bool IsObjCNonFragileABIDefault() const override { return true; } + + CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) + const override { + return ToolChain::CST_Libcxx; + } + void AddClangCXXStdlibIncludeArgs( + const llvm::opt::ArgList &DriverArgs, + llvm::opt::ArgStringList &CC1Args) const override; + void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args, + llvm::opt::ArgStringList &CmdArgs) const override; + + bool isPIEDefault() const override { return false; } + +protected: + Tool *buildLinker() const override; +}; + class LLVM_LIBRARY_VISIBILITY Solaris : public Generic_GCC { public: Solaris(const Driver &D, const llvm::Triple &Triple, @@ -670,7 +708,7 @@ protected: public: Hexagon_TC(const Driver &D, const llvm::Triple &Triple, const llvm::opt::ArgList &Args); - ~Hexagon_TC(); + ~Hexagon_TC() override; void AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs, @@ -686,6 +724,49 @@ public: const llvm::opt::ArgList &Args); static StringRef GetTargetCPU(const llvm::opt::ArgList &Args); + + static const char *GetSmallDataThreshold(const llvm::opt::ArgList &Args); + + static bool UsesG0(const char* smallDataThreshold); +}; + +class LLVM_LIBRARY_VISIBILITY NaCl_TC : public Generic_ELF { +public: + NaCl_TC(const Driver &D, const llvm::Triple &Triple, + const llvm::opt::ArgList &Args); + + void + AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs, + llvm::opt::ArgStringList &CC1Args) const override; + void + AddClangCXXStdlibIncludeArgs(const llvm::opt::ArgList &DriverArgs, + llvm::opt::ArgStringList &CC1Args) const override; + + CXXStdlibType + GetCXXStdlibType(const llvm::opt::ArgList &Args) const override; + + void + AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args, + llvm::opt::ArgStringList &CmdArgs) const override; + + bool + IsIntegratedAssemblerDefault() const override { return false; } + + // Get the path to the file containing NaCl's ARM macros. It lives in NaCl_TC + // because the AssembleARM tool needs a const char * that it can pass around + // and the toolchain outlives all the jobs. + const char *GetNaClArmMacrosPath() const { return NaClArmMacrosPath.c_str(); } + + std::string ComputeEffectiveClangTriple(const llvm::opt::ArgList &Args, + types::ID InputType) const override; + std::string Linker; + +protected: + Tool *buildLinker() const override; + Tool *buildAssembler() const override; + +private: + std::string NaClArmMacrosPath; }; /// TCEToolChain - A tool chain using the llvm bitcode tools to perform @@ -694,7 +775,7 @@ class LLVM_LIBRARY_VISIBILITY TCEToolChain : public ToolChain { public: TCEToolChain(const Driver &D, const llvm::Triple &Triple, const llvm::opt::ArgList &Args); - ~TCEToolChain(); + ~TCEToolChain() override; bool IsMathErrnoDefault() const override; bool isPICDefault() const override; diff --git a/contrib/llvm/tools/clang/lib/Driver/Tools.cpp b/contrib/llvm/tools/clang/lib/Driver/Tools.cpp index 5161ddf..72a242c 100644 --- a/contrib/llvm/tools/clang/lib/Driver/Tools.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/Tools.cpp @@ -10,6 +10,7 @@ #include "Tools.h" #include "InputInfo.h" #include "ToolChains.h" +#include "clang/Basic/CharInfo.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/ObjCRuntime.h" #include "clang/Basic/Version.h" @@ -23,6 +24,7 @@ #include "clang/Driver/SanitizerArgs.h" #include "clang/Driver/ToolChain.h" #include "clang/Driver/Util.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringSwitch.h" @@ -30,16 +32,20 @@ #include "llvm/Option/Arg.h" #include "llvm/Option/ArgList.h" #include "llvm/Option/Option.h" +#include "llvm/Support/TargetParser.h" #include "llvm/Support/Compression.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/FileSystem.h" -#include "llvm/Support/Format.h" #include "llvm/Support/Host.h" #include "llvm/Support/Path.h" #include "llvm/Support/Process.h" #include "llvm/Support/Program.h" #include "llvm/Support/raw_ostream.h" +#ifdef LLVM_ON_UNIX +#include <unistd.h> // For getuid(). +#endif + using namespace clang::driver; using namespace clang::driver::tools; using namespace clang; @@ -319,8 +325,9 @@ void Clang::AddPreprocessingOptions(Compilation &C, if (A->getOption().matches(options::OPT_M) || A->getOption().matches(options::OPT_MD)) CmdArgs.push_back("-sys-header-deps"); - - if (isa<PrecompileJobAction>(JA)) + if ((isa<PrecompileJobAction>(JA) && + !Args.hasArg(options::OPT_fno_module_file_deps)) || + Args.hasArg(options::OPT_fmodule_file_deps)) CmdArgs.push_back("-module-file-deps"); } @@ -332,6 +339,7 @@ void Clang::AddPreprocessingOptions(Compilation &C, } Args.AddLastArg(CmdArgs, options::OPT_MP); + Args.AddLastArg(CmdArgs, options::OPT_MV); // Convert all -MQ <target> args to -MT <quoted target> for (arg_iterator it = Args.filtered_begin(options::OPT_MT, @@ -377,19 +385,19 @@ void Clang::AddPreprocessingOptions(Compilation &C, P += ".dummy"; if (UsePCH) { llvm::sys::path::replace_extension(P, "pch"); - if (llvm::sys::fs::exists(P.str())) + if (llvm::sys::fs::exists(P)) FoundPCH = true; } if (!FoundPCH) { llvm::sys::path::replace_extension(P, "pth"); - if (llvm::sys::fs::exists(P.str())) + if (llvm::sys::fs::exists(P)) FoundPTH = true; } if (!FoundPCH && !FoundPTH) { llvm::sys::path::replace_extension(P, "gch"); - if (llvm::sys::fs::exists(P.str())) { + if (llvm::sys::fs::exists(P)) { FoundPCH = UsePCH; FoundPTH = !UsePCH; } @@ -402,12 +410,12 @@ void Clang::AddPreprocessingOptions(Compilation &C, CmdArgs.push_back("-include-pch"); else CmdArgs.push_back("-include-pth"); - CmdArgs.push_back(Args.MakeArgString(P.str())); + CmdArgs.push_back(Args.MakeArgString(P)); continue; } else { // Ignore the PCH if not first on command line and emit warning. D.Diag(diag::warn_drv_pch_not_first_include) - << P.str() << A->getAsString(Args); + << P << A->getAsString(Args); } } } @@ -489,6 +497,7 @@ static bool isSignedCharDefault(const llvm::Triple &Triple) { return true; return false; + case llvm::Triple::hexagon: case llvm::Triple::ppc64le: case llvm::Triple::systemz: case llvm::Triple::xcore: @@ -536,75 +545,88 @@ static void getARMFPUFeatures(const Driver &D, const Arg *A, std::vector<const char *> &Features) { StringRef FPU = A->getValue(); - // Set the target features based on the FPU. - if (FPU == "fpa" || FPU == "fpe2" || FPU == "fpe3" || FPU == "maverick") { - // Disable any default FPU support. + // FIXME: Why does "none" disable more than "invalid"? + if (FPU == "none") { Features.push_back("-vfp2"); Features.push_back("-vfp3"); + Features.push_back("-vfp4"); + Features.push_back("-fp-armv8"); + Features.push_back("-crypto"); Features.push_back("-neon"); - } else if (FPU == "vfp") { + return; + } + + // FIXME: Make sure we differentiate sp-only. + if (FPU.find("-sp-") != StringRef::npos) { + Features.push_back("+fp-only-sp"); + } + + // All other FPU types, valid or invalid. + switch(llvm::ARMTargetParser::parseFPU(FPU)) { + case llvm::ARM::FK_INVALID: + case llvm::ARM::FK_SOFTVFP: + Features.push_back("-vfp2"); + Features.push_back("-vfp3"); + Features.push_back("-neon"); + break; + case llvm::ARM::FK_VFP: + case llvm::ARM::FK_VFPV2: Features.push_back("+vfp2"); Features.push_back("-neon"); - } else if (FPU == "vfp3-d16" || FPU == "vfpv3-d16") { - Features.push_back("+vfp3"); + break; + case llvm::ARM::FK_VFPV3_D16: Features.push_back("+d16"); - Features.push_back("-neon"); - } else if (FPU == "vfp3" || FPU == "vfpv3") { + // fall-through + case llvm::ARM::FK_VFPV3: Features.push_back("+vfp3"); Features.push_back("-neon"); - } else if (FPU == "vfp4-d16" || FPU == "vfpv4-d16") { - Features.push_back("+vfp4"); + break; + case llvm::ARM::FK_VFPV4_D16: Features.push_back("+d16"); - Features.push_back("-neon"); - } else if (FPU == "vfp4" || FPU == "vfpv4") { + // fall-through + case llvm::ARM::FK_VFPV4: Features.push_back("+vfp4"); Features.push_back("-neon"); - } else if (FPU == "fp4-sp-d16" || FPU == "fpv4-sp-d16") { - Features.push_back("+vfp4"); - Features.push_back("+d16"); - Features.push_back("+fp-only-sp"); - Features.push_back("-neon"); - } else if (FPU == "fp5-sp-d16" || FPU == "fpv5-sp-d16") { - Features.push_back("+fp-armv8"); - Features.push_back("+fp-only-sp"); + break; + case llvm::ARM::FK_FPV5_D16: Features.push_back("+d16"); - Features.push_back("-neon"); - Features.push_back("-crypto"); - } else if (FPU == "fp5-dp-d16" || FPU == "fpv5-dp-d16" || - FPU == "fp5-d16" || FPU == "fpv5-d16") { + // fall-through + case llvm::ARM::FK_FP_ARMV8: Features.push_back("+fp-armv8"); - Features.push_back("+d16"); Features.push_back("-neon"); Features.push_back("-crypto"); - } else if (FPU == "fp-armv8") { - Features.push_back("+fp-armv8"); - Features.push_back("-neon"); - Features.push_back("-crypto"); - } else if (FPU == "neon-fp-armv8") { + break; + case llvm::ARM::FK_NEON_FP_ARMV8: Features.push_back("+fp-armv8"); Features.push_back("+neon"); Features.push_back("-crypto"); - } else if (FPU == "crypto-neon-fp-armv8") { + break; + case llvm::ARM::FK_CRYPTO_NEON_FP_ARMV8: Features.push_back("+fp-armv8"); Features.push_back("+neon"); Features.push_back("+crypto"); - } else if (FPU == "neon") { - Features.push_back("+neon"); - } else if (FPU == "neon-vfpv3") { - Features.push_back("+vfp3"); + break; + case llvm::ARM::FK_NEON: Features.push_back("+neon"); - } else if (FPU == "neon-vfpv4") { + break; + case llvm::ARM::FK_NEON_VFPV4: Features.push_back("+neon"); Features.push_back("+vfp4"); - } else if (FPU == "none") { - Features.push_back("-vfp2"); - Features.push_back("-vfp3"); - Features.push_back("-vfp4"); - Features.push_back("-fp-armv8"); - Features.push_back("-crypto"); - Features.push_back("-neon"); - } else + break; + default: D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args); + } +} + +static int getARMSubArchVersionNumber(const llvm::Triple &Triple) { + llvm::StringRef Arch = Triple.getArchName(); + return llvm::ARMTargetParser::parseArchVersion(Arch); +} + +static bool isARMMProfile(const llvm::Triple &Triple) { + llvm::StringRef Arch = Triple.getArchName(); + unsigned Profile = llvm::ARMTargetParser::parseArchProfile(Arch); + return Profile == llvm::ARM::PK_M; } // Select the float ABI as determined by -msoft-float, -mhard-float, and @@ -637,11 +659,8 @@ StringRef tools::arm::getARMFloatABI(const Driver &D, const ArgList &Args, case llvm::Triple::IOS: { // Darwin defaults to "softfp" for v6 and v7. // - // FIXME: Factor out an ARM class so we can cache the arch somewhere. - std::string ArchName = - arm::getLLVMArchSuffixForARM(arm::getARMTargetCPU(Args, Triple)); - if (StringRef(ArchName).startswith("v6") || - StringRef(ArchName).startswith("v7")) + if (getARMSubArchVersionNumber(Triple) == 6 || + getARMSubArchVersionNumber(Triple) == 7) FloatABI = "softfp"; else FloatABI = "soft"; @@ -681,9 +700,7 @@ StringRef tools::arm::getARMFloatABI(const Driver &D, const ArgList &Args, FloatABI = "softfp"; break; case llvm::Triple::Android: { - std::string ArchName = - arm::getLLVMArchSuffixForARM(arm::getARMTargetCPU(Args, Triple)); - if (StringRef(ArchName).startswith("v7")) + if (getARMSubArchVersionNumber(Triple) == 7) FloatABI = "softfp"; else FloatABI = "soft"; @@ -736,6 +753,25 @@ static void getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple, if (const Arg *A = Args.getLastArg(options::OPT_mhwdiv_EQ)) getARMHWDivFeatures(D, A, Args, Features); + // Check if -march is valid by checking if it can be canonicalised. getARMArch + // is used here instead of just checking the -march value in order to handle + // -march=native correctly. + if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) { + StringRef Arch = arm::getARMArch(Args, Triple); + if (llvm::ARMTargetParser::getCanonicalArchName(Arch).empty()) + D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args); + } + + // We do a similar thing with -mcpu, but here things are complicated because + // the only function we have to check if a cpu is valid is + // getLLVMArchSuffixForARM which also needs an architecture. + if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) { + StringRef CPU = arm::getARMTargetCPU(Args, Triple); + StringRef Arch = arm::getARMArch(Args, Triple); + if (strcmp(arm::getLLVMArchSuffixForARM(CPU, Arch), "") == 0) + D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args); + } + // Setting -msoft-float effectively disables NEON because of the GCC // implementation, although the same isn't true of VFP or VFP3. if (FloatABI == "soft") { @@ -744,14 +780,17 @@ static void getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple, Features.push_back("-crypto"); } - // En/disable crc - if (Arg *A = Args.getLastArg(options::OPT_mcrc, - options::OPT_mnocrc)) { + // En/disable crc code generation. + if (Arg *A = Args.getLastArg(options::OPT_mcrc, options::OPT_mnocrc)) { if (A->getOption().matches(options::OPT_mcrc)) Features.push_back("+crc"); else Features.push_back("-crc"); } + + if (Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v8_1a) { + Features.insert(Features.begin(), "+v8.1a"); + } } void Clang::AddARMTargetArgs(const ArgList &Args, @@ -761,7 +800,6 @@ void Clang::AddARMTargetArgs(const ArgList &Args, // Get the effective triple, which takes into account the deployment target. std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args); llvm::Triple Triple(TripleStr); - std::string CPUName = arm::getARMTargetCPU(Args, Triple); // Select the ABI to use. // @@ -775,7 +813,7 @@ void Clang::AddARMTargetArgs(const ArgList &Args, // the frontend matches that. if (Triple.getEnvironment() == llvm::Triple::EABI || Triple.getOS() == llvm::Triple::UnknownOS || - StringRef(CPUName).startswith("cortex-m")) { + isARMMProfile(Triple)) { ABIName = "aapcs"; } else { ABIName = "apcs-gnu"; @@ -856,12 +894,14 @@ void Clang::AddARMTargetArgs(const ArgList &Args, } } - // Setting -mno-global-merge disables the codegen global merge pass. Setting - // -mglobal-merge has no effect as the pass is enabled by default. + // Forward the -mglobal-merge option for explicit control over the pass. if (Arg *A = Args.getLastArg(options::OPT_mglobal_merge, options::OPT_mno_global_merge)) { + CmdArgs.push_back("-backend-option"); if (A->getOption().matches(options::OPT_mno_global_merge)) - CmdArgs.push_back("-mno-global-merge"); + CmdArgs.push_back("-arm-global-merge=false"); + else + CmdArgs.push_back("-arm-global-merge=true"); } if (!Args.hasFlag(options::OPT_mimplicit_float, @@ -952,12 +992,14 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args, CmdArgs.push_back("-aarch64-fix-cortex-a53-835769=1"); } - // Setting -mno-global-merge disables the codegen global merge pass. Setting - // -mglobal-merge has no effect as the pass is enabled by default. + // Forward the -mglobal-merge option for explicit control over the pass. if (Arg *A = Args.getLastArg(options::OPT_mglobal_merge, options::OPT_mno_global_merge)) { + CmdArgs.push_back("-backend-option"); if (A->getOption().matches(options::OPT_mno_global_merge)) - CmdArgs.push_back("-mno-global-merge"); + CmdArgs.push_back("-aarch64-global-merge=false"); + else + CmdArgs.push_back("-aarch64-global-merge=true"); } if (Args.hasArg(options::OPT_ffixed_x18)) { @@ -1096,17 +1138,6 @@ static void getMIPSTargetFeatures(const Driver &D, const llvm::Triple &Triple, mips::getMipsCPUAndABI(Args, Triple, CPUName, ABIName); ABIName = getGnuCompatibleMipsABIName(ABIName); - // Always override the backend's default ABI. - std::string ABIFeature = llvm::StringSwitch<StringRef>(ABIName) - .Case("32", "+o32") - .Case("n32", "+n32") - .Case("64", "+n64") - .Case("eabi", "+eabi") - .Default(("+" + ABIName).str()); - Features.push_back("-o32"); - Features.push_back("-n64"); - Features.push_back(Args.MakeArgString(ABIFeature)); - AddTargetFeature(Args, Features, options::OPT_mno_abicalls, options::OPT_mabicalls, "noabicalls"); @@ -1120,11 +1151,21 @@ static void getMIPSTargetFeatures(const Driver &D, const llvm::Triple &Triple, if (Arg *A = Args.getLastArg(options::OPT_mnan_EQ)) { StringRef Val = StringRef(A->getValue()); - if (Val == "2008") - Features.push_back("+nan2008"); - else if (Val == "legacy") - Features.push_back("-nan2008"); - else + if (Val == "2008") { + if (mips::getSupportedNanEncoding(CPUName) & mips::Nan2008) + Features.push_back("+nan2008"); + else { + Features.push_back("-nan2008"); + D.Diag(diag::warn_target_unsupported_nan2008) << CPUName; + } + } else if (Val == "legacy") { + if (mips::getSupportedNanEncoding(CPUName) & mips::NanLegacy) + Features.push_back("-nan2008"); + else { + Features.push_back("+nan2008"); + D.Diag(diag::warn_target_unsupported_nanlegacy) << CPUName; + } + } else D.Diag(diag::err_drv_unsupported_option_argument) << A->getOption().getName() << Val; } @@ -1322,9 +1363,22 @@ void Clang::AddPPCTargetArgs(const ArgList &Args, ABIName = A->getValue(); } else if (getToolChain().getTriple().isOSLinux()) switch(getToolChain().getArch()) { - case llvm::Triple::ppc64: + case llvm::Triple::ppc64: { + // When targeting a processor that supports QPX, or if QPX is + // specifically enabled, default to using the ABI that supports QPX (so + // long as it is not specifically disabled). + bool HasQPX = false; + if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) + HasQPX = A->getValue() == StringRef("a2q"); + HasQPX = Args.hasFlag(options::OPT_mqpx, options::OPT_mno_qpx, HasQPX); + if (HasQPX) { + ABIName = "elfv1-qpx"; + break; + } + ABIName = "elfv1"; break; + } case llvm::Triple::ppc64le: ABIName = "elfv2"; break; @@ -1410,6 +1464,26 @@ static const char *getSystemZTargetCPU(const ArgList &Args) { return "z10"; } +static void getSystemZTargetFeatures(const ArgList &Args, + std::vector<const char *> &Features) { + // -m(no-)htm overrides use of the transactional-execution facility. + if (Arg *A = Args.getLastArg(options::OPT_mhtm, + options::OPT_mno_htm)) { + if (A->getOption().matches(options::OPT_mhtm)) + Features.push_back("+transactional-execution"); + else + Features.push_back("-transactional-execution"); + } + // -m(no-)vx overrides use of the vector facility. + if (Arg *A = Args.getLastArg(options::OPT_mvx, + options::OPT_mno_vx)) { + if (A->getOption().matches(options::OPT_mvx)) + Features.push_back("+vector"); + else + Features.push_back("-vector"); + } +} + static const char *getX86TargetCPU(const ArgList &Args, const llvm::Triple &Triple) { if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) { @@ -1445,6 +1519,10 @@ static const char *getX86TargetCPU(const ArgList &Args, return Is64Bit ? "core2" : "yonah"; } + // Set up default CPU name for PS4 compilers. + if (Triple.isPS4CPU()) + return "btver2"; + // On Android use targets compatible with gcc if (Triple.getEnvironment() == llvm::Triple::Android) return Is64Bit ? "x86-64" : "i686"; @@ -1512,6 +1590,7 @@ static std::string getCPUName(const ArgList &Args, const llvm::Triple &T) { } case llvm::Triple::sparc: + case llvm::Triple::sparcel: case llvm::Triple::sparcv9: if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) return A->getValue(); @@ -1551,10 +1630,20 @@ static void AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args, CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=mcpu=") + CPU)); } -static void getX86TargetFeatures(const Driver & D, - const llvm::Triple &Triple, +static void getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple, const ArgList &Args, std::vector<const char *> &Features) { + // If -march=native, autodetect the feature list. + if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) { + if (StringRef(A->getValue()) == "native") { + llvm::StringMap<bool> HostFeatures; + if (llvm::sys::getHostCPUFeatures(HostFeatures)) + for (auto &F : HostFeatures) + Features.push_back(Args.MakeArgString((F.second ? "+" : "-") + + F.first())); + } + } + if (Triple.getArchName() == "x86_64h") { // x86_64h implies quite a few of the more modern subtarget features // for Haswell class CPUs, but not all of them. Opt-out of a few. @@ -1566,7 +1655,7 @@ static void getX86TargetFeatures(const Driver & D, Features.push_back("-fsgsbase"); } - // Add features to comply with gcc on Android + // Add features to be compatible with gcc for Android. if (Triple.getEnvironment() == llvm::Triple::Android) { if (Triple.getArch() == llvm::Triple::x86_64) { Features.push_back("+sse4.2"); @@ -1575,7 +1664,7 @@ static void getX86TargetFeatures(const Driver & D, Features.push_back("+ssse3"); } - // Set features according to the -arch flag on MSVC + // Set features according to the -arch flag on MSVC. if (Arg *A = Args.getLastArg(options::OPT__SLASH_arch)) { StringRef Arch = A->getValue(); bool ArchUsed = false; @@ -1656,39 +1745,16 @@ void Clang::AddX86TargetArgs(const ArgList &Args, } } -static inline bool HasPICArg(const ArgList &Args) { - return Args.hasArg(options::OPT_fPIC) - || Args.hasArg(options::OPT_fpic); -} - -static Arg *GetLastSmallDataThresholdArg(const ArgList &Args) { - return Args.getLastArg(options::OPT_G, - options::OPT_G_EQ, - options::OPT_msmall_data_threshold_EQ); -} - -static std::string GetHexagonSmallDataThresholdValue(const ArgList &Args) { - std::string value; - if (HasPICArg(Args)) - value = "0"; - else if (Arg *A = GetLastSmallDataThresholdArg(Args)) { - value = A->getValue(); - A->claim(); - } - return value; -} - void Clang::AddHexagonTargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const { - CmdArgs.push_back("-fno-signed-char"); CmdArgs.push_back("-mqdsp6-compat"); CmdArgs.push_back("-Wreturn-type"); - std::string SmallDataThreshold = GetHexagonSmallDataThresholdValue(Args); - if (!SmallDataThreshold.empty()) { + if (const char* v = toolchains::Hexagon_TC::GetSmallDataThreshold(Args)) { + std::string SmallDataThreshold="-hexagon-small-data-threshold="; + SmallDataThreshold += v; CmdArgs.push_back ("-mllvm"); - CmdArgs.push_back(Args.MakeArgString( - "-hexagon-small-data-threshold=" + SmallDataThreshold)); + CmdArgs.push_back(Args.MakeArgString(SmallDataThreshold)); } if (!Args.hasArg(options::OPT_fno_short_enums)) @@ -1734,7 +1800,7 @@ static bool DecodeAArch64Mcpu(const Driver &D, StringRef Mcpu, StringRef &CPU, std::vector<const char *> &Features) { std::pair<StringRef, StringRef> Split = Mcpu.split("+"); CPU = Split.first; - if (CPU == "cyclone" || CPU == "cortex-a53" || CPU == "cortex-a57") { + if (CPU == "cyclone" || CPU == "cortex-a53" || CPU == "cortex-a57" || CPU == "cortex-a72") { Features.push_back("+neon"); Features.push_back("+crc"); Features.push_back("+crypto"); @@ -1755,8 +1821,17 @@ getAArch64ArchFeaturesFromMarch(const Driver &D, StringRef March, const ArgList &Args, std::vector<const char *> &Features) { std::pair<StringRef, StringRef> Split = March.split("+"); - if (Split.first != "armv8-a") + + if (Split.first == "armv8-a" || + Split.first == "armv8a") { + // ok, no additional features. + } else if ( + Split.first == "armv8.1-a" || + Split.first == "armv8.1a" ) { + Features.push_back("+v8.1a"); + } else { return false; + } if (Split.second.size() && !DecodeAArch64Features(D, Split.second, Features)) return false; @@ -1871,9 +1946,13 @@ static void getTargetFeatures(const Driver &D, const llvm::Triple &Triple, getPPCTargetFeatures(Args, Features); break; case llvm::Triple::sparc: + case llvm::Triple::sparcel: case llvm::Triple::sparcv9: getSparcTargetFeatures(Args, Features); break; + case llvm::Triple::systemz: + getSystemZTargetFeatures(Args, Features); + break; case llvm::Triple::aarch64: case llvm::Triple::aarch64_be: getAArch64TargetFeatures(D, Args, Features); @@ -1934,16 +2013,17 @@ static bool exceptionSettings(const ArgList &Args, const llvm::Triple &Triple) { return false; } -/// addExceptionArgs - Adds exception related arguments to the driver command -/// arguments. There's a master flag, -fexceptions and also language specific -/// flags to enable/disable C++ and Objective-C exceptions. -/// This makes it possible to for example disable C++ exceptions but enable -/// Objective-C exceptions. +/// Adds exception related arguments to the driver command arguments. There's a +/// master flag, -fexceptions and also language specific flags to enable/disable +/// C++ and Objective-C exceptions. This makes it possible to for example +/// disable C++ exceptions but enable Objective-C exceptions. static void addExceptionArgs(const ArgList &Args, types::ID InputType, - const llvm::Triple &Triple, - bool KernelOrKext, + const ToolChain &TC, bool KernelOrKext, const ObjCRuntime &objcRuntime, ArgStringList &CmdArgs) { + const Driver &D = TC.getDriver(); + const llvm::Triple &Triple = TC.getTriple(); + if (KernelOrKext) { // -mkernel and -fapple-kext imply no exceptions, so claim exception related // arguments now to avoid warnings about unused arguments. @@ -1971,16 +2051,32 @@ static void addExceptionArgs(const ArgList &Args, types::ID InputType, } if (types::isCXX(InputType)) { - bool CXXExceptionsEnabled = Triple.getArch() != llvm::Triple::xcore; - if (Arg *A = Args.getLastArg(options::OPT_fcxx_exceptions, - options::OPT_fno_cxx_exceptions, - options::OPT_fexceptions, - options::OPT_fno_exceptions)) + bool CXXExceptionsEnabled = + Triple.getArch() != llvm::Triple::xcore && !Triple.isPS4CPU(); + Arg *ExceptionArg = Args.getLastArg( + options::OPT_fcxx_exceptions, options::OPT_fno_cxx_exceptions, + options::OPT_fexceptions, options::OPT_fno_exceptions); + if (ExceptionArg) CXXExceptionsEnabled = - A->getOption().matches(options::OPT_fcxx_exceptions) || - A->getOption().matches(options::OPT_fexceptions); + ExceptionArg->getOption().matches(options::OPT_fcxx_exceptions) || + ExceptionArg->getOption().matches(options::OPT_fexceptions); if (CXXExceptionsEnabled) { + if (Triple.isPS4CPU()) { + ToolChain::RTTIMode RTTIMode = TC.getRTTIMode(); + assert(ExceptionArg && + "On the PS4 exceptions should only be enabled if passing " + "an argument"); + if (RTTIMode == ToolChain::RM_DisabledExplicitly) { + const Arg *RTTIArg = TC.getRTTIArg(); + assert(RTTIArg && "RTTI disabled explicitly but no RTTIArg!"); + D.Diag(diag::err_drv_argument_not_allowed_with) + << RTTIArg->getAsString(Args) << ExceptionArg->getAsString(Args); + } else if (RTTIMode == ToolChain::RM_EnabledImplicitly) + D.Diag(diag::warn_drv_enabling_rtti_with_exceptions); + } else + assert(TC.getRTTIMode() != ToolChain::RM_DisabledImplicitly); + CmdArgs.push_back("-fcxx-exceptions"); EH = true; @@ -2135,8 +2231,11 @@ static SmallString<128> getCompilerRTLibDir(const ToolChain &TC) { } static SmallString<128> getCompilerRT(const ToolChain &TC, StringRef Component, - bool Shared = false, - const char *Env = "") { + bool Shared = false) { + const char *Env = TC.getTriple().getEnvironment() == llvm::Triple::Android + ? "-android" + : ""; + bool IsOSWindows = TC.getTriple().isOSWindows(); StringRef Arch = getArchNameForCompilerRTLib(TC); const char *Prefix = IsOSWindows ? "" : "lib"; @@ -2171,6 +2270,7 @@ static void addProfileRT(const ToolChain &TC, const ArgList &Args, false) || Args.hasArg(options::OPT_fprofile_generate) || Args.hasArg(options::OPT_fprofile_instr_generate) || + Args.hasArg(options::OPT_fprofile_instr_generate_EQ) || Args.hasArg(options::OPT_fcreate_profile) || Args.hasArg(options::OPT_coverage))) return; @@ -2181,16 +2281,11 @@ static void addProfileRT(const ToolChain &TC, const ArgList &Args, static void addSanitizerRuntime(const ToolChain &TC, const ArgList &Args, ArgStringList &CmdArgs, StringRef Sanitizer, bool IsShared) { - const char *Env = TC.getTriple().getEnvironment() == llvm::Triple::Android - ? "-android" - : ""; - // Static runtimes must be forced into executable, so we wrap them in // whole-archive. if (!IsShared) CmdArgs.push_back("-whole-archive"); - CmdArgs.push_back(Args.MakeArgString(getCompilerRT(TC, Sanitizer, IsShared, - Env))); + CmdArgs.push_back(Args.MakeArgString(getCompilerRT(TC, Sanitizer, IsShared))); if (!IsShared) CmdArgs.push_back("-no-whole-archive"); } @@ -2251,19 +2346,20 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args, StaticRuntimes.push_back("dfsan"); if (SanArgs.needsLsanRt()) StaticRuntimes.push_back("lsan"); - if (SanArgs.needsMsanRt()) + if (SanArgs.needsMsanRt()) { StaticRuntimes.push_back("msan"); - if (SanArgs.needsTsanRt()) + if (SanArgs.linkCXXRuntimes()) + StaticRuntimes.push_back("msan_cxx"); + } + if (SanArgs.needsTsanRt()) { StaticRuntimes.push_back("tsan"); - // WARNING: UBSan should always go last. + if (SanArgs.linkCXXRuntimes()) + StaticRuntimes.push_back("tsan_cxx"); + } if (SanArgs.needsUbsanRt()) { - // If UBSan is not combined with another sanitizer, we need to pull in - // sanitizer_common explicitly. - if (StaticRuntimes.empty()) - HelperStaticRuntimes.push_back("san"); - StaticRuntimes.push_back("ubsan"); + StaticRuntimes.push_back("ubsan_standalone"); if (SanArgs.linkCXXRuntimes()) - StaticRuntimes.push_back("ubsan_cxx"); + StaticRuntimes.push_back("ubsan_standalone_cxx"); } } @@ -2291,27 +2387,49 @@ static bool addSanitizerRuntimes(const ToolChain &TC, const ArgList &Args, return !StaticRuntimes.empty(); } +static bool areOptimizationsEnabled(const ArgList &Args) { + // Find the last -O arg and see if it is non-zero. + if (Arg *A = Args.getLastArg(options::OPT_O_Group)) + return !A->getOption().matches(options::OPT_O0); + // Defaults to -O0. + return false; +} + static bool shouldUseFramePointerForTarget(const ArgList &Args, const llvm::Triple &Triple) { - switch (Triple.getArch()) { - // Don't use a frame pointer on linux if optimizing for certain targets. - case llvm::Triple::mips64: - case llvm::Triple::mips64el: - case llvm::Triple::mips: - case llvm::Triple::mipsel: - case llvm::Triple::systemz: - case llvm::Triple::x86: - case llvm::Triple::x86_64: - if (Triple.isOSLinux()) - if (Arg *A = Args.getLastArg(options::OPT_O_Group)) - if (!A->getOption().matches(options::OPT_O0)) - return false; - return true; - case llvm::Triple::xcore: + // XCore never wants frame pointers, regardless of OS. + if (Triple.getArch() == llvm::Triple::xcore) { return false; - default: - return true; } + + if (Triple.isOSLinux()) { + switch (Triple.getArch()) { + // Don't use a frame pointer on linux if optimizing for certain targets. + case llvm::Triple::mips64: + case llvm::Triple::mips64el: + case llvm::Triple::mips: + case llvm::Triple::mipsel: + case llvm::Triple::systemz: + case llvm::Triple::x86: + case llvm::Triple::x86_64: + return !areOptimizationsEnabled(Args); + default: + return true; + } + } + + if (Triple.isOSWindows()) { + switch (Triple.getArch()) { + case llvm::Triple::x86: + return !areOptimizationsEnabled(Args); + default: + // All other supported Windows ISAs use xdata unwind information, so frame + // pointers are not generally useful. + return false; + } + } + + return true; } static bool shouldUseFramePointer(const ArgList &Args, @@ -2329,6 +2447,9 @@ static bool shouldUseLeafFramePointer(const ArgList &Args, options::OPT_momit_leaf_frame_pointer)) return A->getOption().matches(options::OPT_mno_omit_leaf_frame_pointer); + if (Triple.isPS4CPU()) + return false; + return shouldUseFramePointerForTarget(Args, Triple); } @@ -2342,7 +2463,7 @@ static void addDebugCompDirArg(const ArgList &Args, ArgStringList &CmdArgs) { } static const char *SplitDebugName(const ArgList &Args, - const InputInfoList &Inputs) { + const InputInfo &Input) { Arg *FinalOutput = Args.getLastArg(options::OPT_o); if (FinalOutput && Args.hasArg(options::OPT_c)) { SmallString<128> T(FinalOutput->getValue()); @@ -2352,7 +2473,7 @@ static const char *SplitDebugName(const ArgList &Args, // Use the compilation dir. SmallString<128> T( Args.getLastArgValue(options::OPT_fdebug_compilation_dir)); - SmallString<128> F(llvm::sys::path::stem(Inputs[0].getBaseInput())); + SmallString<128> F(llvm::sys::path::stem(Input.getBaseInput())); llvm::sys::path::replace_extension(F, "dwo"); T += F; return Args.MakeArgString(F); @@ -2431,24 +2552,17 @@ static void addDashXForInput(const ArgList &Args, const InputInfo &Input, CmdArgs.push_back(types::getTypeName(Input.getType())); } -static std::string getMSCompatibilityVersion(const char *VersionStr) { - unsigned Version; - if (StringRef(VersionStr).getAsInteger(10, Version)) - return "0"; - +static VersionTuple getMSCompatibilityVersion(unsigned Version) { if (Version < 100) - return llvm::utostr_32(Version) + ".0"; + return VersionTuple(Version); if (Version < 10000) - return llvm::utostr_32(Version / 100) + "." + - llvm::utostr_32(Version % 100); + return VersionTuple(Version / 100, Version % 100); unsigned Build = 0, Factor = 1; for ( ; Version > 10000; Version = Version / 10, Factor = Factor * 10) Build = Build + (Version % 10) * Factor; - return llvm::utostr_32(Version / 100) + "." + - llvm::utostr_32(Version % 100) + "." + - llvm::utostr_32(Build); + return VersionTuple(Version / 100, Version % 100, Build); } // Claim options we don't want to warn if they are unused. We do this for @@ -2461,6 +2575,38 @@ static void claimNoWarnArgs(const ArgList &Args) { Args.ClaimAllArgs(options::OPT_fno_lto); } +static void appendUserToPath(SmallVectorImpl<char> &Result) { +#ifdef LLVM_ON_UNIX + const char *Username = getenv("LOGNAME"); +#else + const char *Username = getenv("USERNAME"); +#endif + if (Username) { + // Validate that LoginName can be used in a path, and get its length. + size_t Len = 0; + for (const char *P = Username; *P; ++P, ++Len) { + if (!isAlphanumeric(*P) && *P != '_') { + Username = nullptr; + break; + } + } + + if (Username && Len > 0) { + Result.append(Username, Username + Len); + return; + } + } + + // Fallback to user id. +#ifdef LLVM_ON_UNIX + std::string UID = llvm::utostr(getuid()); +#else + // FIXME: Windows seems to have an 'SID' that might work. + std::string UID = "9999"; +#endif + Result.append(UID.begin(), UID.end()); +} + void Clang::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, @@ -2477,6 +2623,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, bool IsWindowsMSVC = getToolChain().getTriple().isWindowsMSVCEnvironment(); assert(Inputs.size() == 1 && "Unable to handle multiple inputs."); + const InputInfo &Input = Inputs[0]; // Invoke ourselves in -cc1 mode. // @@ -2568,6 +2715,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, assert(JA.getType() == types::TY_PP_Asm && "Unexpected output type!"); } + + // Preserve use-list order by default when emitting bitcode, so that + // loading the bitcode up in 'opt' or 'llc' and running passes gives the + // same result as running passes here. For LTO, we don't need to preserve + // the use-list order, since serialization to bitcode is part of the flow. + if (JA.getType() == types::TY_LLVM_BC) + CmdArgs.push_back("-emit-llvm-uselists"); } // We normally speed up the clang process a bit by skipping destructors at @@ -2584,7 +2738,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, // Set the main file name, so that debug info works even with // -save-temps. CmdArgs.push_back("-main-file-name"); - CmdArgs.push_back(getBaseInputName(Args, Inputs)); + CmdArgs.push_back(getBaseInputName(Args, Input)); // Some flags which affect the language (via preprocessor // defines). @@ -2612,7 +2766,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back("-analyzer-checker=deadcode"); - if (types::isCXX(Inputs[0].getType())) + if (types::isCXX(Input.getType())) CmdArgs.push_back("-analyzer-checker=cplusplus"); // Enable the following experimental checkers for testing. @@ -2680,6 +2834,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, case llvm::Triple::mips64: case llvm::Triple::mips64el: case llvm::Triple::sparc: + case llvm::Triple::sparcel: case llvm::Triple::x86: case llvm::Triple::x86_64: IsPICLevelTwo = false; // "-fpie" @@ -2781,6 +2936,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, else CmdArgs.push_back(Args.MakeArgString(getToolChain().getThreadModel())); + Args.AddLastArg(CmdArgs, options::OPT_fveclib); + if (!Args.hasFlag(options::OPT_fmerge_all_constants, options::OPT_fno_merge_all_constants)) CmdArgs.push_back("-fno-merge-all-constants"); @@ -2961,6 +3118,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, !TrappingMath) CmdArgs.push_back("-menable-unsafe-fp-math"); + if (!SignedZeros) + CmdArgs.push_back("-fno-signed-zeros"); + + if (ReciprocalMath) + CmdArgs.push_back("-freciprocal-math"); // Validate and pass through -fp-contract option. if (Arg *A = Args.getLastArg(options::OPT_ffast_math, FastMathAliasOption, @@ -3060,9 +3222,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, } // Add the target cpu - std::string ETripleStr = getToolChain().ComputeEffectiveClangTriple(Args); - llvm::Triple ETriple(ETripleStr); - std::string CPU = getCPUName(Args, ETriple); + std::string CPU = getCPUName(Args, Triple); if (!CPU.empty()) { CmdArgs.push_back("-target-cpu"); CmdArgs.push_back(Args.MakeArgString(CPU)); @@ -3074,7 +3234,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, } // Add the target features - getTargetFeatures(D, ETriple, Args, CmdArgs, false); + getTargetFeatures(D, Triple, Args, CmdArgs, false); // Add target specific flags. switch(getToolChain().getArch()) { @@ -3107,6 +3267,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, break; case llvm::Triple::sparc: + case llvm::Triple::sparcel: case llvm::Triple::sparcv9: AddSparcTargetArgs(Args, CmdArgs); break; @@ -3136,7 +3297,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, // Explicitly error on some things we know we don't support and can't just // ignore. - types::ID InputType = Inputs[0].getType(); + types::ID InputType = Input.getType(); if (!Args.hasArg(options::OPT_fallow_unsupported)) { Arg *Unsupported; if (types::isCXX(InputType) && @@ -3235,25 +3396,36 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back("-generate-type-units"); } + // CloudABI uses -ffunction-sections and -fdata-sections by default. + bool UseSeparateSections = Triple.getOS() == llvm::Triple::CloudABI; + if (Args.hasFlag(options::OPT_ffunction_sections, - options::OPT_fno_function_sections, false)) { + options::OPT_fno_function_sections, UseSeparateSections)) { CmdArgs.push_back("-ffunction-sections"); } if (Args.hasFlag(options::OPT_fdata_sections, - options::OPT_fno_data_sections, false)) { + options::OPT_fno_data_sections, UseSeparateSections)) { CmdArgs.push_back("-fdata-sections"); } + if (!Args.hasFlag(options::OPT_funique_section_names, + options::OPT_fno_unique_section_names, true)) + CmdArgs.push_back("-fno-unique-section-names"); + Args.AddAllArgs(CmdArgs, options::OPT_finstrument_functions); - if (Args.hasArg(options::OPT_fprofile_instr_generate) && + if ((Args.hasArg(options::OPT_fprofile_instr_generate) || + Args.hasArg(options::OPT_fprofile_instr_generate_EQ)) && (Args.hasArg(options::OPT_fprofile_instr_use) || Args.hasArg(options::OPT_fprofile_instr_use_EQ))) D.Diag(diag::err_drv_argument_not_allowed_with) << "-fprofile-instr-generate" << "-fprofile-instr-use"; - Args.AddAllArgs(CmdArgs, options::OPT_fprofile_instr_generate); + if (Arg *A = Args.getLastArg(options::OPT_fprofile_instr_generate_EQ)) + A->render(Args, CmdArgs); + else + Args.AddAllArgs(CmdArgs, options::OPT_fprofile_instr_generate); if (Arg *A = Args.getLastArg(options::OPT_fprofile_instr_use_EQ)) A->render(Args, CmdArgs); @@ -3269,7 +3441,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back("-femit-coverage-data"); if (Args.hasArg(options::OPT_fcoverage_mapping) && - !Args.hasArg(options::OPT_fprofile_instr_generate)) + !(Args.hasArg(options::OPT_fprofile_instr_generate) || + Args.hasArg(options::OPT_fprofile_instr_generate_EQ))) D.Diag(diag::err_drv_argument_only_allowed_with) << "-fcoverage-mapping" << "-fprofile-instr-generate"; @@ -3286,10 +3459,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, } else { CoverageFilename = llvm::sys::path::filename(Output.getBaseInput()); } - if (llvm::sys::path::is_relative(CoverageFilename.str())) { + if (llvm::sys::path::is_relative(CoverageFilename)) { SmallString<128> Pwd; if (!llvm::sys::fs::current_path(Pwd)) { - llvm::sys::path::append(Pwd, CoverageFilename.str()); + llvm::sys::path::append(Pwd, CoverageFilename); CoverageFilename.swap(Pwd); } } @@ -3372,6 +3545,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_all); Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_readonly_property); Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_readwrite_property); + Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_property_dot_syntax); Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_annotation); Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_instancetype); Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_nsmacros); @@ -3427,6 +3601,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, // // If a std is supplied, only add -trigraphs if it follows the // option. + bool ImplyVCPPCXXVer = false; if (Arg *Std = Args.getLastArg(options::OPT_std_EQ, options::OPT_ansi)) { if (Std->getOption().matches(options::OPT_ansi)) if (types::isCXX(InputType)) @@ -3453,7 +3628,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, Args.AddAllArgsTranslated(CmdArgs, options::OPT_std_default_EQ, "-std=", /*Joined=*/true); else if (IsWindowsMSVC) - CmdArgs.push_back("-std=c++11"); + ImplyVCPPCXXVer = true; Args.AddLastArg(CmdArgs, options::OPT_ftrigraphs, options::OPT_fno_trigraphs); @@ -3628,6 +3803,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_show_template_tree); Args.AddLastArg(CmdArgs, options::OPT_fno_elide_type); + // Forward flags for OpenMP + if (Args.hasArg(options::OPT_fopenmp_EQ) || + Args.hasArg(options::OPT_fopenmp)) { + CmdArgs.push_back("-fopenmp"); + } + const SanitizerArgs &Sanitize = getToolChain().getSanitizerArgs(); Sanitize.addArgs(Args, CmdArgs); @@ -3739,6 +3920,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back(Args.MakeArgString("-mstack-alignment=" + alignment)); } + if (Args.hasArg(options::OPT_mstack_probe_size)) { + StringRef Size = Args.getLastArgValue(options::OPT_mstack_probe_size); + + if (!Size.empty()) + CmdArgs.push_back(Args.MakeArgString("-mstack-probe-size=" + Size)); + else + CmdArgs.push_back("-mstack-probe-size=0"); + } + if (getToolChain().getTriple().getArch() == llvm::Triple::aarch64 || getToolChain().getTriple().getArch() == llvm::Triple::aarch64_be) CmdArgs.push_back("-fallow-half-arguments-and-returns"); @@ -3845,6 +4035,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back("-fmodules-strict-decluse"); } + // -fno-implicit-modules turns off implicitly compiling modules on demand. + if (!Args.hasFlag(options::OPT_fimplicit_modules, + options::OPT_fno_implicit_modules)) { + CmdArgs.push_back("-fno-implicit-modules"); + } + // -fmodule-name specifies the module that is currently being built (or // used for header checking by -fmodule-maps). Args.AddLastArg(CmdArgs, options::OPT_fmodule_name); @@ -3872,7 +4068,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, // No module path was provided: use the default. llvm::sys::path::system_temp_directory(/*erasedOnReboot=*/false, ModuleCachePath); - llvm::sys::path::append(ModuleCachePath, "org.llvm.clang"); + llvm::sys::path::append(ModuleCachePath, "org.llvm.clang."); + appendUserToPath(ModuleCachePath); llvm::sys::path::append(ModuleCachePath, "ModuleCache"); } const char Arg[] = "-fmodules-cache-path="; @@ -3911,10 +4108,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, llvm::sys::fs::file_status Status; if (llvm::sys::fs::status(A->getValue(), Status)) D.Diag(diag::err_drv_no_such_file) << A->getValue(); - char TimeStamp[48]; - snprintf(TimeStamp, sizeof(TimeStamp), "-fbuild-session-timestamp=%" PRIu64, - (uint64_t)Status.getLastModificationTime().toEpochTime()); - CmdArgs.push_back(Args.MakeArgString(TimeStamp)); + CmdArgs.push_back(Args.MakeArgString( + "-fbuild-session-timestamp=" + + Twine((uint64_t)Status.getLastModificationTime().toEpochTime()))); } if (Args.getLastArg(options::OPT_fmodules_validate_once_per_build_session)) { @@ -3940,21 +4136,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, false)) CmdArgs.push_back("-fno-elide-constructors"); - // -frtti is default. - if (!Args.hasFlag(options::OPT_frtti, options::OPT_fno_rtti) || - KernelOrKext) { - CmdArgs.push_back("-fno-rtti"); + ToolChain::RTTIMode RTTIMode = getToolChain().getRTTIMode(); - // -fno-rtti cannot usefully be combined with -fsanitize=vptr. - if (Sanitize.sanitizesVptr()) { - std::string NoRttiArg = - Args.getLastArg(options::OPT_mkernel, - options::OPT_fapple_kext, - options::OPT_fno_rtti)->getAsString(Args); - D.Diag(diag::err_drv_argument_not_allowed_with) - << "-fsanitize=vptr" << NoRttiArg; - } - } + if (KernelOrKext || (types::isCXX(InputType) && + (RTTIMode == ToolChain::RM_DisabledExplicitly || + RTTIMode == ToolChain::RM_DisabledImplicitly))) + CmdArgs.push_back("-fno-rtti"); // -fshort-enums=0 is default for all architectures except Hexagon. if (Args.hasFlag(options::OPT_fshort_enums, @@ -3964,14 +4151,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back("-fshort-enums"); // -fsigned-char is default. - if (!Args.hasFlag(options::OPT_fsigned_char, options::OPT_funsigned_char, - isSignedCharDefault(getToolChain().getTriple()))) + if (Arg *A = Args.getLastArg( + options::OPT_fsigned_char, options::OPT_fno_signed_char, + options::OPT_funsigned_char, options::OPT_fno_unsigned_char)) { + if (A->getOption().matches(options::OPT_funsigned_char) || + A->getOption().matches(options::OPT_fno_signed_char)) { + CmdArgs.push_back("-fno-signed-char"); + } + } else if (!isSignedCharDefault(getToolChain().getTriple())) { CmdArgs.push_back("-fno-signed-char"); - - // -fthreadsafe-static is default. - if (!Args.hasFlag(options::OPT_fthreadsafe_statics, - options::OPT_fno_threadsafe_statics)) - CmdArgs.push_back("-fno-threadsafe-statics"); + } // -fuse-cxa-atexit is default. if (!Args.hasFlag(options::OPT_fuse_cxa_atexit, @@ -3987,6 +4176,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, IsWindowsMSVC)) CmdArgs.push_back("-fms-extensions"); + // -fno-use-line-directives is default. + if (Args.hasFlag(options::OPT_fuse_line_directives, + options::OPT_fno_use_line_directives, false)) + CmdArgs.push_back("-fuse-line-directives"); + // -fms-compatibility=0 is default. if (Args.hasFlag(options::OPT_fms_compatibility, options::OPT_fno_ms_compatibility, @@ -3995,9 +4189,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, true)))) CmdArgs.push_back("-fms-compatibility"); - // -fms-compatibility-version=17.00 is default. + // -fms-compatibility-version=18.00 is default. + VersionTuple MSVT; if (Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions, - IsWindowsMSVC) || Args.hasArg(options::OPT_fmsc_version) || + IsWindowsMSVC) || + Args.hasArg(options::OPT_fmsc_version) || Args.hasArg(options::OPT_fms_compatibility_version)) { const Arg *MSCVersion = Args.getLastArg(options::OPT_fmsc_version); const Arg *MSCompatibilityVersion = @@ -4008,16 +4204,31 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, << MSCVersion->getAsString(Args) << MSCompatibilityVersion->getAsString(Args); - std::string Ver; - if (MSCompatibilityVersion) - Ver = Args.getLastArgValue(options::OPT_fms_compatibility_version); - else if (MSCVersion) - Ver = getMSCompatibilityVersion(MSCVersion->getValue()); + if (MSCompatibilityVersion) { + if (MSVT.tryParse(MSCompatibilityVersion->getValue())) + D.Diag(diag::err_drv_invalid_value) + << MSCompatibilityVersion->getAsString(Args) + << MSCompatibilityVersion->getValue(); + } else if (MSCVersion) { + unsigned Version = 0; + if (StringRef(MSCVersion->getValue()).getAsInteger(10, Version)) + D.Diag(diag::err_drv_invalid_value) << MSCVersion->getAsString(Args) + << MSCVersion->getValue(); + MSVT = getMSCompatibilityVersion(Version); + } else { + MSVT = VersionTuple(18); + } + + CmdArgs.push_back( + Args.MakeArgString("-fms-compatibility-version=" + MSVT.getAsString())); + } - if (Ver.empty()) - CmdArgs.push_back("-fms-compatibility-version=17.00"); + bool IsMSVC2015Compatible = MSVT.getMajor() >= 19; + if (ImplyVCPPCXXVer) { + if (IsMSVC2015Compatible) + CmdArgs.push_back("-std=c++14"); else - CmdArgs.push_back(Args.MakeArgString("-fms-compatibility-version=" + Ver)); + CmdArgs.push_back("-std=c++11"); } // -fno-borland-extensions is default. @@ -4025,6 +4236,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, options::OPT_fno_borland_extensions, false)) CmdArgs.push_back("-fborland-extensions"); + // -fthreadsafe-static is default, except for MSVC compatibility versions less + // than 19. + if (!Args.hasFlag(options::OPT_fthreadsafe_statics, + options::OPT_fno_threadsafe_statics, + !IsWindowsMSVC || IsMSVC2015Compatible)) + CmdArgs.push_back("-fno-threadsafe-statics"); + // -fno-delayed-template-parsing is default, except for Windows where MSVC STL // needs it. if (Args.hasFlag(options::OPT_fdelayed_template_parsing, @@ -4130,9 +4348,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, } } + if (Args.hasFlag(options::OPT_fapplication_extension, + options::OPT_fno_application_extension, false)) + CmdArgs.push_back("-fapplication-extension"); + // Handle GCC-style exception args. if (!C.getDriver().IsCLMode()) - addExceptionArgs(Args, InputType, getToolChain().getTriple(), KernelOrKext, + addExceptionArgs(Args, InputType, getToolChain(), KernelOrKext, objcRuntime, CmdArgs); if (getToolChain().UseSjLjExceptions()) @@ -4143,6 +4365,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, options::OPT_fno_assume_sane_operator_new)) CmdArgs.push_back("-fno-assume-sane-operator-new"); + // -fsized-deallocation is off by default, as it is an ABI-breaking change for + // most platforms. + if (Args.hasFlag(options::OPT_fsized_deallocation, + options::OPT_fno_sized_deallocation, false)) + CmdArgs.push_back("-fsized-deallocation"); + // -fconstant-cfstrings is default, and may be subject to argument translation // on Darwin. if (!Args.hasFlag(options::OPT_fconstant_cfstrings, @@ -4326,6 +4554,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, false)) CmdArgs.push_back("-fasm-blocks"); + // -fgnu-inline-asm is default. + if (!Args.hasFlag(options::OPT_fgnu_inline_asm, + options::OPT_fno_gnu_inline_asm, true)) + CmdArgs.push_back("-fno-gnu-inline-asm"); + // Enable vectorization per default according to the optimization level // selected. For optimization levels that want vectorization we use the alias // option to simplify the hasFlag logic. @@ -4450,7 +4683,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, // With -save-temps, we want to save the unoptimized bitcode output from the // CompileJobAction, so disable optimizations if they are not already // disabled. - if (Args.hasArg(options::OPT_save_temps) && !OptDisabled && + if (C.getDriver().isSaveTempsEnabled() && !OptDisabled && isa<CompileJobAction>(JA)) CmdArgs.push_back("-disable-llvm-optzns"); @@ -4492,7 +4725,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, Flags += EscapedArg; } CmdArgs.push_back("-dwarf-debug-flags"); - CmdArgs.push_back(Args.MakeArgString(Flags.str())); + CmdArgs.push_back(Args.MakeArgString(Flags)); } // Add the split debug info name to the command lines here so we @@ -4504,7 +4737,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, const char *SplitDwarfOut; if (SplitDwarf) { CmdArgs.push_back("-split-dwarf-file"); - SplitDwarfOut = SplitDebugName(Args, Inputs); + SplitDwarfOut = SplitDebugName(Args, Input); CmdArgs.push_back(SplitDwarfOut); } @@ -4772,8 +5005,8 @@ void Clang::AddClangCLArgs(const ArgList &Args, ArgStringList &CmdArgs) const { EHFlags EH = parseClangCLEHFlags(D, Args); // FIXME: Do something with NoExceptC. if (EH.Synch || EH.Asynch) { - CmdArgs.push_back("-fexceptions"); CmdArgs.push_back("-fcxx-exceptions"); + CmdArgs.push_back("-fexceptions"); } // /EP should expand to -E -P. @@ -4782,6 +5015,19 @@ void Clang::AddClangCLArgs(const ArgList &Args, ArgStringList &CmdArgs) const { CmdArgs.push_back("-P"); } + unsigned VolatileOptionID; + if (getToolChain().getTriple().getArch() == llvm::Triple::x86_64 || + getToolChain().getTriple().getArch() == llvm::Triple::x86) + VolatileOptionID = options::OPT__SLASH_volatile_ms; + else + VolatileOptionID = options::OPT__SLASH_volatile_iso; + + if (Arg *A = Args.getLastArg(options::OPT__SLASH_volatile_Group)) + VolatileOptionID = A->getOption().getID(); + + if (VolatileOptionID == options::OPT__SLASH_volatile_ms) + CmdArgs.push_back("-fms-volatile"); + Arg *MostGeneralArg = Args.getLastArg(options::OPT__SLASH_vmg); Arg *BestCaseArg = Args.getLastArg(options::OPT__SLASH_vmb); if (MostGeneralArg && BestCaseArg) @@ -4826,6 +5072,17 @@ visualstudio::Compile *Clang::getCLFallback() const { return CLFallback.get(); } +void ClangAs::AddMIPSTargetArgs(const ArgList &Args, + ArgStringList &CmdArgs) const { + StringRef CPUName; + StringRef ABIName; + const llvm::Triple &Triple = getToolChain().getTriple(); + mips::getMipsCPUAndABI(Args, Triple, CPUName, ABIName); + + CmdArgs.push_back("-target-abi"); + CmdArgs.push_back(ABIName.data()); +} + void ClangAs::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, @@ -4862,10 +5119,10 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA, // Set the main file name, so that debug info works even with // -save-temps or preprocessed assembly. CmdArgs.push_back("-main-file-name"); - CmdArgs.push_back(Clang::getBaseInputName(Args, Inputs)); + CmdArgs.push_back(Clang::getBaseInputName(Args, Input)); // Add the target cpu - const llvm::Triple &Triple = getToolChain().getTriple(); + const llvm::Triple Triple(TripleStr); std::string CPU = getCPUName(Args, Triple); if (!CPU.empty()) { CmdArgs.push_back("-target-cpu"); @@ -4928,11 +5185,24 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA, Flags += EscapedArg; } CmdArgs.push_back("-dwarf-debug-flags"); - CmdArgs.push_back(Args.MakeArgString(Flags.str())); + CmdArgs.push_back(Args.MakeArgString(Flags)); } // FIXME: Add -static support, once we have it. + // Add target specific flags. + switch(getToolChain().getArch()) { + default: + break; + + case llvm::Triple::mips: + case llvm::Triple::mipsel: + case llvm::Triple::mips64: + case llvm::Triple::mips64el: + AddMIPSTargetArgs(Args, CmdArgs); + break; + } + // Consume all the warning flags. Usually this would be handled more // gracefully by -cc1 (warning about unknown warning flags, etc) but -cc1as // doesn't handle that so rather than warning about unused flags that are @@ -4964,7 +5234,7 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA, if (Args.hasArg(options::OPT_gsplit_dwarf) && getToolChain().getTriple().isOSLinux()) SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output, - SplitDebugName(Args, Inputs)); + SplitDebugName(Args, Input)); } void GnuTool::anchor() {} @@ -5095,16 +5365,22 @@ void gcc::Compile::RenderExtraToolArgs(const JobAction &JA, ArgStringList &CmdArgs) const { const Driver &D = getToolChain().getDriver(); + switch (JA.getType()) { // If -flto, etc. are present then make sure not to force assembly output. - if (JA.getType() == types::TY_LLVM_IR || JA.getType() == types::TY_LTO_IR || - JA.getType() == types::TY_LLVM_BC || JA.getType() == types::TY_LTO_BC) + case types::TY_LLVM_IR: + case types::TY_LTO_IR: + case types::TY_LLVM_BC: + case types::TY_LTO_BC: CmdArgs.push_back("-c"); - else { - if (JA.getType() != types::TY_PP_Asm) - D.Diag(diag::err_drv_invalid_gcc_output_type) - << getTypeName(JA.getType()); - + break; + case types::TY_PP_Asm: CmdArgs.push_back("-S"); + break; + case types::TY_Nothing: + CmdArgs.push_back("-fsyntax-only"); + break; + default: + D.Diag(diag::err_drv_invalid_gcc_output_type) << getTypeName(JA.getType()); } } @@ -5142,10 +5418,8 @@ void hexagon::Assemble::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back("-fsyntax-only"); } - std::string SmallDataThreshold = GetHexagonSmallDataThresholdValue(Args); - if (!SmallDataThreshold.empty()) - CmdArgs.push_back( - Args.MakeArgString(std::string("-G") + SmallDataThreshold)); + if (const char* v = toolchains::Hexagon_TC::GetSmallDataThreshold(Args)) + CmdArgs.push_back(Args.MakeArgString(std::string("-G") + v)); Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler); @@ -5188,17 +5462,16 @@ void hexagon::Link::RenderExtraToolArgs(const JobAction &JA, // The types are (hopefully) good enough. } -void hexagon::Link::ConstructJob(Compilation &C, const JobAction &JA, - const InputInfo &Output, - const InputInfoList &Inputs, - const ArgList &Args, - const char *LinkingOutput) const { +static void constructHexagonLinkArgs(Compilation &C, const JobAction &JA, + const toolchains::Hexagon_TC& ToolChain, + const InputInfo &Output, + const InputInfoList &Inputs, + const ArgList &Args, + ArgStringList &CmdArgs, + const char *LinkingOutput) { - const toolchains::Hexagon_TC& ToolChain = - static_cast<const toolchains::Hexagon_TC&>(getToolChain()); const Driver &D = ToolChain.getDriver(); - ArgStringList CmdArgs; //---------------------------------------------------------------------------- // @@ -5209,6 +5482,7 @@ void hexagon::Link::ConstructJob(Compilation &C, const JobAction &JA, bool incStdLib = !Args.hasArg(options::OPT_nostdlib); bool incStartFiles = !Args.hasArg(options::OPT_nostartfiles); bool incDefLibs = !Args.hasArg(options::OPT_nodefaultlibs); + bool useG0 = false; bool useShared = buildingLib && !hasStaticArg; //---------------------------------------------------------------------------- @@ -5242,10 +5516,9 @@ void hexagon::Link::ConstructJob(Compilation &C, const JobAction &JA, if (buildPIE && !buildingLib) CmdArgs.push_back("-pie"); - std::string SmallDataThreshold = GetHexagonSmallDataThresholdValue(Args); - if (!SmallDataThreshold.empty()) { - CmdArgs.push_back( - Args.MakeArgString(std::string("-G") + SmallDataThreshold)); + if (const char* v = toolchains::Hexagon_TC::GetSmallDataThreshold(Args)) { + CmdArgs.push_back(Args.MakeArgString(std::string("-G") + v)); + useG0 = toolchains::Hexagon_TC::UsesG0(v); } //---------------------------------------------------------------------------- @@ -5261,8 +5534,7 @@ void hexagon::Link::ConstructJob(Compilation &C, const JobAction &JA, toolchains::Hexagon_TC::GetGnuDir(D.InstalledDir, Args) + "/"; const std::string StartFilesDir = RootDir + "hexagon/lib" - + (buildingLib - ? MarchG0Suffix : MarchSuffix); + + (useG0 ? MarchG0Suffix : MarchSuffix); //---------------------------------------------------------------------------- // moslib @@ -5344,6 +5616,20 @@ void hexagon::Link::ConstructJob(Compilation &C, const JobAction &JA, std::string finiObj = useShared ? "/finiS.o" : "/fini.o"; CmdArgs.push_back(Args.MakeArgString(StartFilesDir + finiObj)); } +} + +void hexagon::Link::ConstructJob(Compilation &C, const JobAction &JA, + const InputInfo &Output, + const InputInfoList &Inputs, + const ArgList &Args, + const char *LinkingOutput) const { + + const toolchains::Hexagon_TC& ToolChain = + static_cast<const toolchains::Hexagon_TC&>(getToolChain()); + + ArgStringList CmdArgs; + constructHexagonLinkArgs(C, JA, ToolChain, Output, Inputs, Args, CmdArgs, + LinkingOutput); std::string Linker = ToolChain.GetProgramPath("hexagon-ld"); C.addCommand(llvm::make_unique<Command>(JA, *this, Args.MakeArgString(Linker), @@ -5351,9 +5637,8 @@ void hexagon::Link::ConstructJob(Compilation &C, const JobAction &JA, } // Hexagon tools end. -/// Get the (LLVM) name of the minimum ARM CPU for the arch we are targeting. -const char *arm::getARMCPUForMArch(const ArgList &Args, - const llvm::Triple &Triple) { +const StringRef arm::getARMArch(const ArgList &Args, + const llvm::Triple &Triple) { StringRef MArch; if (Arg *A = Args.getLastArg(options::OPT_march_EQ)) { // Otherwise, if we have -march= choose the base CPU for that arch. @@ -5367,13 +5652,35 @@ const char *arm::getARMCPUForMArch(const ArgList &Args, if (MArch == "native") { std::string CPU = llvm::sys::getHostCPUName(); if (CPU != "generic") { - // Translate the native cpu into the architecture. The switch below will - // then chose the minimum cpu for that arch. - MArch = std::string("arm") + arm::getLLVMArchSuffixForARM(CPU); + // Translate the native cpu into the architecture suffix for that CPU. + const char *Suffix = arm::getLLVMArchSuffixForARM(CPU, MArch); + // If there is no valid architecture suffix for this CPU we don't know how + // to handle it, so return no architecture. + if (strcmp(Suffix,"") == 0) + MArch = ""; + else + MArch = std::string("arm") + Suffix; } } - return Triple.getARMCPUForArch(MArch); + return MArch; +} +/// Get the (LLVM) name of the minimum ARM CPU for the arch we are targeting. +const char *arm::getARMCPUForMArch(const ArgList &Args, + const llvm::Triple &Triple) { + StringRef MArch = getARMArch(Args, Triple); + // getARMCPUForArch defaults to the triple if MArch is empty, but empty MArch + // here means an -march=native that we can't handle, so instead return no CPU. + if (MArch.empty()) + return ""; + + // We need to return an empty string here on invalid MArch values as the + // various places that call this function can't cope with a null result. + const char *result = Triple.getARMCPUForArch(MArch); + if (result) + return result; + else + return ""; } /// getARMTargetCPU - Get the (LLVM) name of the ARM cpu we are targeting. @@ -5394,13 +5701,24 @@ StringRef arm::getARMTargetCPU(const ArgList &Args, } /// getLLVMArchSuffixForARM - Get the LLVM arch name to use for a particular -/// CPU. +/// CPU (or Arch, if CPU is generic). // // FIXME: This is redundant with -mcpu, why does LLVM use this. // FIXME: tblgen this, or kill it! -const char *arm::getLLVMArchSuffixForARM(StringRef CPU) { +// FIXME: Use ARMTargetParser. +const char *arm::getLLVMArchSuffixForARM(StringRef CPU, StringRef Arch) { + // FIXME: Use ARMTargetParser + if (CPU == "generic") { + if (Arch == "armv8.1a" || Arch == "armv8.1-a" || + Arch == "armebv8.1a" || Arch == "armebv8.1-a") { + return "v8.1a"; + } + } + + // FIXME: Use ARMTargetParser return llvm::StringSwitch<const char *>(CPU) - .Case("strongarm", "v4") + .Cases("arm8", "arm810", "v4") + .Cases("strongarm", "strongarm110", "strongarm1100", "strongarm1110", "v4") .Cases("arm7tdmi", "arm7tdmi-s", "arm710t", "v4t") .Cases("arm720t", "arm9", "arm9tdmi", "v4t") .Cases("arm920", "arm920t", "arm922t", "v4t") @@ -5409,33 +5727,51 @@ const char *arm::getLLVMArchSuffixForARM(StringRef CPU) { .Cases("arm9e", "arm926ej-s", "arm946e-s", "v5e") .Cases("arm966e-s", "arm968e-s", "arm10e", "v5e") .Cases("arm1020e", "arm1022e", "xscale", "iwmmxt", "v5e") - .Cases("arm1136j-s", "arm1136jf-s", "arm1176jz-s", "v6") - .Cases("arm1176jzf-s", "mpcorenovfp", "mpcore", "v6") + .Cases("arm1136j-s", "arm1136jf-s", "v6") + .Cases("arm1176jz-s", "arm1176jzf-s", "v6k") + .Cases("mpcorenovfp", "mpcore", "v6k") .Cases("arm1156t2-s", "arm1156t2f-s", "v6t2") .Cases("cortex-a5", "cortex-a7", "cortex-a8", "v7") .Cases("cortex-a9", "cortex-a12", "cortex-a15", "cortex-a17", "krait", "v7") - .Cases("cortex-r4", "cortex-r5", "v7r") - .Case("cortex-m0", "v6m") - .Case("cortex-m3", "v7m") + .Cases("cortex-r4", "cortex-r4f", "cortex-r5", "cortex-r7", "v7r") + .Cases("sc000", "cortex-m0", "cortex-m0plus", "cortex-m1", "v6m") + .Cases("sc300", "cortex-m3", "v7m") .Cases("cortex-m4", "cortex-m7", "v7em") .Case("swift", "v7s") .Case("cyclone", "v8") - .Cases("cortex-a53", "cortex-a57", "v8") + .Cases("cortex-a53", "cortex-a57", "cortex-a72", "v8") .Default(""); } -void arm::appendEBLinkFlags(const ArgList &Args, ArgStringList &CmdArgs, const llvm::Triple &Triple) { +void arm::appendEBLinkFlags(const ArgList &Args, ArgStringList &CmdArgs, + const llvm::Triple &Triple) { if (Args.hasArg(options::OPT_r)) return; - StringRef Suffix = getLLVMArchSuffixForARM(getARMCPUForMArch(Args, Triple)); - const char *LinkFlag = llvm::StringSwitch<const char *>(Suffix) - .Cases("v4", "v4t", "v5", "v5e", nullptr) - .Cases("v6", "v6t2", nullptr) - .Default("--be8"); - - if (LinkFlag) - CmdArgs.push_back(LinkFlag); + // ARMv7 (and later) and ARMv6-M do not support BE-32, so instruct the linker + // to generate BE-8 executables. + if (getARMSubArchVersionNumber(Triple) >= 7 || isARMMProfile(Triple)) + CmdArgs.push_back("--be8"); +} + +mips::NanEncoding mips::getSupportedNanEncoding(StringRef &CPU) { + return (NanEncoding)llvm::StringSwitch<int>(CPU) + .Case("mips1", NanLegacy) + .Case("mips2", NanLegacy) + .Case("mips3", NanLegacy) + .Case("mips4", NanLegacy) + .Case("mips5", NanLegacy) + .Case("mips32", NanLegacy) + .Case("mips32r2", NanLegacy) + .Case("mips32r3", NanLegacy | Nan2008) + .Case("mips32r5", NanLegacy | Nan2008) + .Case("mips32r6", Nan2008) + .Case("mips64", NanLegacy) + .Case("mips64r2", NanLegacy) + .Case("mips64r3", NanLegacy | Nan2008) + .Case("mips64r5", NanLegacy | Nan2008) + .Case("mips64r6", Nan2008) + .Default(NanLegacy); } bool mips::hasMipsAbiArg(const ArgList &Args, const char *Value) { @@ -5474,8 +5810,8 @@ bool mips::isFPXXDefault(const llvm::Triple &Triple, StringRef CPUName, return llvm::StringSwitch<bool>(CPUName) .Cases("mips2", "mips3", "mips4", "mips5", true) - .Cases("mips32", "mips32r2", true) - .Cases("mips64", "mips64r2", true) + .Cases("mips32", "mips32r2", "mips32r3", "mips32r5", true) + .Cases("mips64", "mips64r2", "mips64r3", "mips64r5", true) .Default(false); } @@ -5527,14 +5863,13 @@ void darwin::setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str) { } const char *Clang::getBaseInputName(const ArgList &Args, - const InputInfoList &Inputs) { - return Args.MakeArgString( - llvm::sys::path::filename(Inputs[0].getBaseInput())); + const InputInfo &Input) { + return Args.MakeArgString(llvm::sys::path::filename(Input.getBaseInput())); } const char *Clang::getBaseInputStem(const ArgList &Args, const InputInfoList &Inputs) { - const char *Str = getBaseInputName(Args, Inputs); + const char *Str = getBaseInputName(Args, Inputs[0]); if (const char *End = strrchr(Str, '.')) return Args.MakeArgString(std::string(Str, End)); @@ -5556,6 +5891,76 @@ const char *Clang::getDependencyFileName(const ArgList &Args, return Args.MakeArgString(Res + ".d"); } +void cloudabi::Link::ConstructJob(Compilation &C, const JobAction &JA, + const InputInfo &Output, + const InputInfoList &Inputs, + const ArgList &Args, + const char *LinkingOutput) const { + const ToolChain &ToolChain = getToolChain(); + const Driver &D = ToolChain.getDriver(); + ArgStringList CmdArgs; + + // Silence warning for "clang -g foo.o -o foo" + Args.ClaimAllArgs(options::OPT_g_Group); + // and "clang -emit-llvm foo.o -o foo" + Args.ClaimAllArgs(options::OPT_emit_llvm); + // and for "clang -w foo.o -o foo". Other warning options are already + // handled somewhere else. + Args.ClaimAllArgs(options::OPT_w); + + if (!D.SysRoot.empty()) + CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot)); + + // CloudABI only supports static linkage. + CmdArgs.push_back("-Bstatic"); + CmdArgs.push_back("--eh-frame-hdr"); + CmdArgs.push_back("--gc-sections"); + + if (Output.isFilename()) { + CmdArgs.push_back("-o"); + CmdArgs.push_back(Output.getFilename()); + } else { + assert(Output.isNothing() && "Invalid output."); + } + + if (!Args.hasArg(options::OPT_nostdlib) && + !Args.hasArg(options::OPT_nostartfiles)) { + CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o"))); + CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtbegin.o"))); + } + + Args.AddAllArgs(CmdArgs, options::OPT_L); + const ToolChain::path_list &Paths = ToolChain.getFilePaths(); + for (const auto &Path : Paths) + CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + Path)); + Args.AddAllArgs(CmdArgs, options::OPT_T_Group); + Args.AddAllArgs(CmdArgs, options::OPT_e); + Args.AddAllArgs(CmdArgs, options::OPT_s); + Args.AddAllArgs(CmdArgs, options::OPT_t); + Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag); + Args.AddAllArgs(CmdArgs, options::OPT_r); + + if (D.IsUsingLTO(ToolChain, Args)) + AddGoldPlugin(ToolChain, Args, CmdArgs); + + AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs); + + if (!Args.hasArg(options::OPT_nostdlib) && + !Args.hasArg(options::OPT_nodefaultlibs)) { + if (D.CCCIsCXX()) + ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs); + CmdArgs.push_back("-lc"); + CmdArgs.push_back("-lcompiler_rt"); + } + + if (!Args.hasArg(options::OPT_nostdlib) && + !Args.hasArg(options::OPT_nostartfiles)) + CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtend.o"))); + + const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath()); + C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs)); +} + void darwin::Assemble::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, @@ -5677,10 +6082,17 @@ void darwin::Link::AddLinkArgs(Compilation &C, if (Args.hasArg(options::OPT_rdynamic) && Version[0] >= 137) CmdArgs.push_back("-export_dynamic"); + // If we are using App Extension restrictions, pass a flag to the linker + // telling it that the compiled code has been audited. + if (Args.hasFlag(options::OPT_fapplication_extension, + options::OPT_fno_application_extension, false)) + CmdArgs.push_back("-application_extension"); + // If we are using LTO, then automatically create a temporary file path for // the linker to use, so that it's lifetime will extend past a possible // dsymutil step. - if (Version[0] >= 116 && D.IsUsingLTO(Args) && NeedsTempPath(Inputs)) { + if (Version[0] >= 116 && D.IsUsingLTO(getToolChain(), Args) && + NeedsTempPath(Inputs)) { const char *TmpPath = C.getArgs().MakeArgString( D.GetTemporaryPath("cc", types::getTypeTempSuffix(types::TY_Object))); C.addTempFile(TmpPath); @@ -5832,6 +6244,27 @@ enum LibOpenMP { LibIOMP5 }; +/// Map a -fopenmp=<blah> macro to the corresponding library. +static LibOpenMP getOpenMPLibByName(StringRef Name) { + return llvm::StringSwitch<LibOpenMP>(Name).Case("libgomp", LibGOMP) + .Case("libiomp5", LibIOMP5) + .Default(LibUnknown); +} + +/// Get the default -l<blah> flag to use for -fopenmp, if no library is +/// specified. This can be overridden at configure time. +static const char *getDefaultOpenMPLibFlag() { +#ifndef OPENMP_DEFAULT_LIB +#define OPENMP_DEFAULT_LIB iomp5 +#endif + +#define STR2(lib) #lib +#define STR(lib) STR2(lib) + return "-l" STR(OPENMP_DEFAULT_LIB); +#undef STR +#undef STR2 +} + void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, @@ -5889,27 +6322,21 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA, Args.AddAllArgs(CmdArgs, options::OPT_L); - LibOpenMP UsedOpenMPLib = LibUnknown; - if (Args.hasArg(options::OPT_fopenmp)) { - UsedOpenMPLib = LibGOMP; - } else if (const Arg *A = Args.getLastArg(options::OPT_fopenmp_EQ)) { - UsedOpenMPLib = llvm::StringSwitch<LibOpenMP>(A->getValue()) - .Case("libgomp", LibGOMP) - .Case("libiomp5", LibIOMP5) - .Default(LibUnknown); - if (UsedOpenMPLib == LibUnknown) + if (const Arg *A = Args.getLastArg(options::OPT_fopenmp_EQ)) { + switch (getOpenMPLibByName(A->getValue())) { + case LibGOMP: + CmdArgs.push_back("-lgomp"); + break; + case LibIOMP5: + CmdArgs.push_back("-liomp5"); + break; + case LibUnknown: getToolChain().getDriver().Diag(diag::err_drv_unsupported_option_argument) << A->getOption().getName() << A->getValue(); - } - switch (UsedOpenMPLib) { - case LibGOMP: - CmdArgs.push_back("-lgomp"); - break; - case LibIOMP5: - CmdArgs.push_back("-liomp5"); - break; - case LibUnknown: - break; + break; + } + } else if (Args.hasArg(options::OPT_fopenmp)) { + CmdArgs.push_back(getDefaultOpenMPLibFlag()); } AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs); @@ -5951,6 +6378,11 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA, if (Args.hasArg(options::OPT_fnested_functions)) CmdArgs.push_back("-allow_stack_execute"); + // TODO: It would be nice to use addProfileRT() here, but darwin's compiler-rt + // paths are different enough from other toolchains that this needs a fair + // amount of refactoring done first. + getMachOToolChain().addProfileRTLibs(Args, CmdArgs); + if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nodefaultlibs)) { if (getToolChain().getDriver().CCCIsCXX()) @@ -5970,6 +6402,22 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA, Args.AddAllArgs(CmdArgs, options::OPT_T_Group); Args.AddAllArgs(CmdArgs, options::OPT_F); + // -iframework should be forwarded as -F. + for (auto it = Args.filtered_begin(options::OPT_iframework), + ie = Args.filtered_end(); it != ie; ++it) + CmdArgs.push_back(Args.MakeArgString(std::string("-F") + + (*it)->getValue())); + + if (!Args.hasArg(options::OPT_nostdlib) && + !Args.hasArg(options::OPT_nodefaultlibs)) { + if (Arg *A = Args.getLastArg(options::OPT_fveclib)) { + if (A->getValue() == StringRef("Accelerate")) { + CmdArgs.push_back("-framework"); + CmdArgs.push_back("Accelerate"); + } + } + } + const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath()); std::unique_ptr<Command> Cmd = @@ -6191,6 +6639,7 @@ void openbsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA, break; case llvm::Triple::sparc: + case llvm::Triple::sparcel: CmdArgs.push_back("-32"); NeedsKPIC = true; break; @@ -6569,6 +7018,7 @@ void freebsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back("-matpcs"); } } else if (getToolChain().getArch() == llvm::Triple::sparc || + getToolChain().getArch() == llvm::Triple::sparcel || getToolChain().getArch() == llvm::Triple::sparcv9) { if (getToolChain().getArch() == llvm::Triple::sparc) CmdArgs.push_back("-Av8plusa"); @@ -6708,7 +7158,7 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA, Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag); Args.AddAllArgs(CmdArgs, options::OPT_r); - if (D.IsUsingLTO(Args)) + if (D.IsUsingLTO(getToolChain(), Args)) AddGoldPlugin(ToolChain, Args, CmdArgs); bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs); @@ -6834,6 +7284,7 @@ void netbsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA, } case llvm::Triple::sparc: + case llvm::Triple::sparcel: CmdArgs.push_back("-32"); addAssemblerKPIC(Args, CmdArgs); break; @@ -6912,7 +7363,8 @@ void netbsd::Link::ConstructJob(Compilation &C, const JobAction &JA, break; case llvm::Triple::armeb: case llvm::Triple::thumbeb: - arm::appendEBLinkFlags(Args, CmdArgs, getToolChain().getTriple()); + arm::appendEBLinkFlags(Args, CmdArgs, + llvm::Triple(getToolChain().ComputeEffectiveClangTriple(Args))); CmdArgs.push_back("-m"); switch (getToolChain().getTriple().getEnvironment()) { case llvm::Triple::EABI: @@ -7080,47 +7532,66 @@ void gnutools::Assemble::ConstructJob(Compilation &C, const JobAction &JA, ArgStringList CmdArgs; bool NeedsKPIC = false; + switch (getToolChain().getArch()) { + default: + break; // Add --32/--64 to make sure we get the format we want. // This is incomplete - if (getToolChain().getArch() == llvm::Triple::x86) { + case llvm::Triple::x86: CmdArgs.push_back("--32"); - } else if (getToolChain().getArch() == llvm::Triple::x86_64) { + break; + case llvm::Triple::x86_64: if (getToolChain().getTriple().getEnvironment() == llvm::Triple::GNUX32) CmdArgs.push_back("--x32"); else CmdArgs.push_back("--64"); - } else if (getToolChain().getArch() == llvm::Triple::ppc) { + break; + case llvm::Triple::ppc: CmdArgs.push_back("-a32"); CmdArgs.push_back("-mppc"); CmdArgs.push_back("-many"); - } else if (getToolChain().getArch() == llvm::Triple::ppc64) { + break; + case llvm::Triple::ppc64: CmdArgs.push_back("-a64"); CmdArgs.push_back("-mppc64"); CmdArgs.push_back("-many"); - } else if (getToolChain().getArch() == llvm::Triple::ppc64le) { + break; + case llvm::Triple::ppc64le: CmdArgs.push_back("-a64"); CmdArgs.push_back("-mppc64"); CmdArgs.push_back("-many"); CmdArgs.push_back("-mlittle-endian"); - } else if (getToolChain().getArch() == llvm::Triple::sparc) { + break; + case llvm::Triple::sparc: + case llvm::Triple::sparcel: CmdArgs.push_back("-32"); CmdArgs.push_back("-Av8plusa"); NeedsKPIC = true; - } else if (getToolChain().getArch() == llvm::Triple::sparcv9) { + break; + case llvm::Triple::sparcv9: CmdArgs.push_back("-64"); CmdArgs.push_back("-Av9a"); NeedsKPIC = true; - } else if (getToolChain().getArch() == llvm::Triple::arm || - getToolChain().getArch() == llvm::Triple::armeb) { - StringRef MArch = getToolChain().getArchName(); - if (MArch == "armv7" || MArch == "armv7a" || MArch == "armv7-a") + break; + case llvm::Triple::arm: + case llvm::Triple::armeb: + case llvm::Triple::thumb: + case llvm::Triple::thumbeb: { + const llvm::Triple &Triple = getToolChain().getTriple(); + switch (Triple.getSubArch()) { + case llvm::Triple::ARMSubArch_v7: CmdArgs.push_back("-mfpu=neon"); - if (MArch == "armv8" || MArch == "armv8a" || MArch == "armv8-a" || - MArch == "armebv8" || MArch == "armebv8a" || MArch == "armebv8-a") + break; + case llvm::Triple::ARMSubArch_v8: CmdArgs.push_back("-mfpu=crypto-neon-fp-armv8"); + break; + default: + break; + } StringRef ARMFloatABI = tools::arm::getARMFloatABI( - getToolChain().getDriver(), Args, getToolChain().getTriple()); + getToolChain().getDriver(), Args, + llvm::Triple(getToolChain().ComputeEffectiveClangTriple(Args))); CmdArgs.push_back(Args.MakeArgString("-mfloat-abi=" + ARMFloatABI)); Args.AddLastArg(CmdArgs, options::OPT_march_EQ); @@ -7135,10 +7606,12 @@ void gnutools::Assemble::ConstructJob(Compilation &C, const JobAction &JA, else Args.AddLastArg(CmdArgs, options::OPT_mcpu_EQ); Args.AddLastArg(CmdArgs, options::OPT_mfpu_EQ); - } else if (getToolChain().getArch() == llvm::Triple::mips || - getToolChain().getArch() == llvm::Triple::mipsel || - getToolChain().getArch() == llvm::Triple::mips64 || - getToolChain().getArch() == llvm::Triple::mips64el) { + break; + } + case llvm::Triple::mips: + case llvm::Triple::mipsel: + case llvm::Triple::mips64: + case llvm::Triple::mips64el: { StringRef CPUName; StringRef ABIName; mips::getMipsCPUAndABI(Args, getToolChain().getTriple(), CPUName, ABIName); @@ -7222,11 +7695,15 @@ void gnutools::Assemble::ConstructJob(Compilation &C, const JobAction &JA, options::OPT_mno_odd_spreg); NeedsKPIC = true; - } else if (getToolChain().getArch() == llvm::Triple::systemz) { + break; + } + case llvm::Triple::systemz: { // Always pass an -march option, since our default of z10 is later // than the GNU assembler's default. StringRef CPUName = getSystemZTargetCPU(Args); CmdArgs.push_back(Args.MakeArgString("-march=" + CPUName)); + break; + } } if (NeedsKPIC) @@ -7250,7 +7727,7 @@ void gnutools::Assemble::ConstructJob(Compilation &C, const JobAction &JA, if (Args.hasArg(options::OPT_gsplit_dwarf) && getToolChain().getTriple().isOSLinux()) SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output, - SplitDebugName(Args, Inputs)); + SplitDebugName(Args, Inputs[0])); } static void AddLibgcc(const llvm::Triple &Triple, const Driver &D, @@ -7294,7 +7771,8 @@ static std::string getLinuxDynamicLinker(const ArgList &Args, else return "/system/bin/linker"; } else if (ToolChain.getArch() == llvm::Triple::x86 || - ToolChain.getArch() == llvm::Triple::sparc) + ToolChain.getArch() == llvm::Triple::sparc || + ToolChain.getArch() == llvm::Triple::sparcel) return "/lib/ld-linux.so.2"; else if (ToolChain.getArch() == llvm::Triple::aarch64) return "/lib/ld-linux-aarch64.so.1"; @@ -7396,6 +7874,7 @@ static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) { case llvm::Triple::ppc64le: return "elf64lppc"; case llvm::Triple::sparc: + case llvm::Triple::sparcel: return "elf32_sparc"; case llvm::Triple::sparcv9: return "elf64_sparc"; @@ -7435,11 +7914,7 @@ void gnutools::Link::ConstructJob(Compilation &C, const JobAction &JA, const bool IsPIE = !Args.hasArg(options::OPT_shared) && !Args.hasArg(options::OPT_static) && - (Args.hasArg(options::OPT_pie) || ToolChain.isPIEDefault() || - // On Android every code is PIC so every executable is PIE - // Cannot use isPIEDefault here since otherwise - // PIE only logic will be enabled during compilation - isAndroid); + (Args.hasArg(options::OPT_pie) || ToolChain.isPIEDefault()); ArgStringList CmdArgs; @@ -7465,7 +7940,8 @@ void gnutools::Link::ConstructJob(Compilation &C, const JobAction &JA, if (ToolChain.getArch() == llvm::Triple::armeb || ToolChain.getArch() == llvm::Triple::thumbeb) - arm::appendEBLinkFlags(Args, CmdArgs, getToolChain().getTriple()); + arm::appendEBLinkFlags(Args, CmdArgs, + llvm::Triple(getToolChain().ComputeEffectiveClangTriple(Args))); for (const auto &Opt : ToolChain.ExtraOpts) CmdArgs.push_back(Opt.c_str()); @@ -7544,7 +8020,7 @@ void gnutools::Link::ConstructJob(Compilation &C, const JobAction &JA, for (const auto &Path : Paths) CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + Path)); - if (D.IsUsingLTO(Args)) + if (D.IsUsingLTO(getToolChain(), Args)) AddGoldPlugin(ToolChain, Args, CmdArgs); if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle)) @@ -7567,6 +8043,8 @@ void gnutools::Link::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back("-Bdynamic"); CmdArgs.push_back("-lm"); } + // Silence warnings when linking C code with a C++ '-stdlib' argument. + Args.ClaimAllArgs(options::OPT_stdlib_EQ); if (!Args.hasArg(options::OPT_nostdlib)) { if (!Args.hasArg(options::OPT_nodefaultlibs)) { @@ -7576,37 +8054,33 @@ void gnutools::Link::ConstructJob(Compilation &C, const JobAction &JA, if (NeedsSanitizerDeps) linkSanitizerRuntimeDeps(ToolChain, CmdArgs); - LibOpenMP UsedOpenMPLib = LibUnknown; - if (Args.hasArg(options::OPT_fopenmp)) { - UsedOpenMPLib = LibGOMP; - } else if (const Arg *A = Args.getLastArg(options::OPT_fopenmp_EQ)) { - UsedOpenMPLib = llvm::StringSwitch<LibOpenMP>(A->getValue()) - .Case("libgomp", LibGOMP) - .Case("libiomp5", LibIOMP5) - .Default(LibUnknown); - if (UsedOpenMPLib == LibUnknown) + bool WantPthread = true; + if (const Arg *A = Args.getLastArg(options::OPT_fopenmp_EQ)) { + switch (getOpenMPLibByName(A->getValue())) { + case LibGOMP: + CmdArgs.push_back("-lgomp"); + + // FIXME: Exclude this for platforms with libgomp that don't require + // librt. Most modern Linux platforms require it, but some may not. + CmdArgs.push_back("-lrt"); + break; + case LibIOMP5: + CmdArgs.push_back("-liomp5"); + break; + case LibUnknown: D.Diag(diag::err_drv_unsupported_option_argument) - << A->getOption().getName() << A->getValue(); - } - switch (UsedOpenMPLib) { - case LibGOMP: - CmdArgs.push_back("-lgomp"); - - // FIXME: Exclude this for platforms with libgomp that don't require - // librt. Most modern Linux platforms require it, but some may not. - CmdArgs.push_back("-lrt"); - break; - case LibIOMP5: - CmdArgs.push_back("-liomp5"); - break; - case LibUnknown: - break; + << A->getOption().getName() << A->getValue(); + break; + } + } else if (Args.hasArg(options::OPT_fopenmp)) { + CmdArgs.push_back(getDefaultOpenMPLibFlag()); + } else { + WantPthread = Args.hasArg(options::OPT_pthread) || + Args.hasArg(options::OPT_pthreads); } AddRunTimeLibs(ToolChain, D, CmdArgs, Args); - if ((Args.hasArg(options::OPT_pthread) || - Args.hasArg(options::OPT_pthreads) || UsedOpenMPLib != LibUnknown) && - !isAndroid) + if (WantPthread && !isAndroid) CmdArgs.push_back("-lpthread"); CmdArgs.push_back("-lc"); @@ -7636,6 +8110,172 @@ void gnutools::Link::ConstructJob(Compilation &C, const JobAction &JA, llvm::make_unique<Command>(JA, *this, ToolChain.Linker.c_str(), CmdArgs)); } + +// NaCl ARM assembly (inline or standalone) can be written with a set of macros +// for the various SFI requirements like register masking. The assembly tool +// inserts the file containing the macros as an input into all the assembly +// jobs. +void nacltools::AssembleARM::ConstructJob(Compilation &C, const JobAction &JA, + const InputInfo &Output, + const InputInfoList &Inputs, + const ArgList &Args, + const char *LinkingOutput) const { + const toolchains::NaCl_TC& ToolChain = + static_cast<const toolchains::NaCl_TC&>(getToolChain()); + InputInfo NaClMacros(ToolChain.GetNaClArmMacrosPath(), types::TY_PP_Asm, + "nacl-arm-macros.s"); + InputInfoList NewInputs; + NewInputs.push_back(NaClMacros); + NewInputs.append(Inputs.begin(), Inputs.end()); + gnutools::Assemble::ConstructJob(C, JA, Output, NewInputs, Args, + LinkingOutput); +} + + +// This is quite similar to gnutools::link::ConstructJob with changes that +// we use static by default, do not yet support sanitizers or LTO, and a few +// others. Eventually we can support more of that and hopefully migrate back +// to gnutools::link. +void nacltools::Link::ConstructJob(Compilation &C, const JobAction &JA, + const InputInfo &Output, + const InputInfoList &Inputs, + const ArgList &Args, + const char *LinkingOutput) const { + + const toolchains::NaCl_TC& ToolChain = + static_cast<const toolchains::NaCl_TC&>(getToolChain()); + const Driver &D = ToolChain.getDriver(); + const bool IsStatic = + !Args.hasArg(options::OPT_dynamic) && + !Args.hasArg(options::OPT_shared); + + ArgStringList CmdArgs; + + // Silence warning for "clang -g foo.o -o foo" + Args.ClaimAllArgs(options::OPT_g_Group); + // and "clang -emit-llvm foo.o -o foo" + Args.ClaimAllArgs(options::OPT_emit_llvm); + // and for "clang -w foo.o -o foo". Other warning options are already + // handled somewhere else. + Args.ClaimAllArgs(options::OPT_w); + + if (!D.SysRoot.empty()) + CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot)); + + if (Args.hasArg(options::OPT_rdynamic)) + CmdArgs.push_back("-export-dynamic"); + + if (Args.hasArg(options::OPT_s)) + CmdArgs.push_back("-s"); + + // NaCl_TC doesn't have ExtraOpts like Linux; the only relevant flag from + // there is --build-id, which we do want. + CmdArgs.push_back("--build-id"); + + if (!IsStatic) + CmdArgs.push_back("--eh-frame-hdr"); + + CmdArgs.push_back("-m"); + if (ToolChain.getArch() == llvm::Triple::x86) + CmdArgs.push_back("elf_i386_nacl"); + else if (ToolChain.getArch() == llvm::Triple::arm) + CmdArgs.push_back("armelf_nacl"); + else if (ToolChain.getArch() == llvm::Triple::x86_64) + CmdArgs.push_back("elf_x86_64_nacl"); + else + D.Diag(diag::err_target_unsupported_arch) << ToolChain.getArchName() << + "Native Client"; + + + if (IsStatic) + CmdArgs.push_back("-static"); + else if (Args.hasArg(options::OPT_shared)) + CmdArgs.push_back("-shared"); + + CmdArgs.push_back("-o"); + CmdArgs.push_back(Output.getFilename()); + if (!Args.hasArg(options::OPT_nostdlib) && + !Args.hasArg(options::OPT_nostartfiles)) { + if (!Args.hasArg(options::OPT_shared)) + CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt1.o"))); + CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o"))); + + const char *crtbegin; + if (IsStatic) + crtbegin = "crtbeginT.o"; + else if (Args.hasArg(options::OPT_shared)) + crtbegin = "crtbeginS.o"; + else + crtbegin = "crtbegin.o"; + CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin))); + } + + Args.AddAllArgs(CmdArgs, options::OPT_L); + Args.AddAllArgs(CmdArgs, options::OPT_u); + + const ToolChain::path_list &Paths = ToolChain.getFilePaths(); + + for (const auto &Path : Paths) + CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + Path)); + + if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle)) + CmdArgs.push_back("--no-demangle"); + + AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs); + + if (D.CCCIsCXX() && + !Args.hasArg(options::OPT_nostdlib) && + !Args.hasArg(options::OPT_nodefaultlibs)) { + bool OnlyLibstdcxxStatic = Args.hasArg(options::OPT_static_libstdcxx) && + !IsStatic; + if (OnlyLibstdcxxStatic) + CmdArgs.push_back("-Bstatic"); + ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs); + if (OnlyLibstdcxxStatic) + CmdArgs.push_back("-Bdynamic"); + CmdArgs.push_back("-lm"); + } + + if (!Args.hasArg(options::OPT_nostdlib)) { + if (!Args.hasArg(options::OPT_nodefaultlibs)) { + // Always use groups, since it has no effect on dynamic libraries. + CmdArgs.push_back("--start-group"); + CmdArgs.push_back("-lc"); + // NaCl's libc++ currently requires libpthread, so just always include it + // in the group for C++. + if (Args.hasArg(options::OPT_pthread) || + Args.hasArg(options::OPT_pthreads) || + D.CCCIsCXX()) { + CmdArgs.push_back("-lpthread"); + } + + CmdArgs.push_back("-lgcc"); + CmdArgs.push_back("--as-needed"); + if (IsStatic) + CmdArgs.push_back("-lgcc_eh"); + else + CmdArgs.push_back("-lgcc_s"); + CmdArgs.push_back("--no-as-needed"); + CmdArgs.push_back("--end-group"); + } + + if (!Args.hasArg(options::OPT_nostartfiles)) { + const char *crtend; + if (Args.hasArg(options::OPT_shared)) + crtend = "crtendS.o"; + else + crtend = "crtend.o"; + + CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend))); + CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o"))); + } + } + + C.addCommand(llvm::make_unique<Command>(JA, *this, + ToolChain.Linker.c_str(), CmdArgs)); +} + + void minix::Assemble::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &Output, const InputInfoList &Inputs, @@ -7926,8 +8566,8 @@ void visualstudio::Link::ConstructJob(Compilation &C, const JobAction &JA, if (!llvm::sys::Process::GetEnv("LIB")) { // If the VC environment hasn't been configured (perhaps because the user // did not run vcvarsall), try to build a consistent link environment. If - // the environment variable is set however, assume the user knows what he's - // doing. + // the environment variable is set however, assume the user knows what + // they're doing. std::string VisualStudioDir; const auto &MSVC = static_cast<const toolchains::MSVCToolChain &>(TC); if (MSVC.getVisualStudioInstallDir(VisualStudioDir)) { @@ -7961,14 +8601,16 @@ void visualstudio::Link::ConstructJob(Compilation &C, const JobAction &JA, if (Args.hasArg(options::OPT_g_Group)) CmdArgs.push_back("-debug"); - bool DLL = Args.hasArg(options::OPT__SLASH_LD, options::OPT__SLASH_LDd); + bool DLL = Args.hasArg(options::OPT__SLASH_LD, + options::OPT__SLASH_LDd, + options::OPT_shared); if (DLL) { CmdArgs.push_back(Args.MakeArgString("-dll")); SmallString<128> ImplibName(Output.getFilename()); llvm::sys::path::replace_extension(ImplibName, "lib"); CmdArgs.push_back(Args.MakeArgString(std::string("-implib:") + - ImplibName.str())); + ImplibName)); } if (TC.getSanitizerArgs().needsAsanRt()) { @@ -8085,7 +8727,7 @@ std::unique_ptr<Command> visualstudio::Compile::GetCommand( } } - // Flags for which clang-cl have an alias. + // Flags for which clang-cl has an alias. // FIXME: How can we ensure this stays in sync with relevant clang-cl options? if (Args.hasFlag(options::OPT__SLASH_GR_, options::OPT__SLASH_GR, @@ -8105,7 +8747,8 @@ std::unique_ptr<Command> visualstudio::Compile::GetCommand( if (Args.hasArg(options::OPT_g_Flag, options::OPT_gline_tables_only)) CmdArgs.push_back("/Z7"); - std::vector<std::string> Includes = Args.getAllArgValues(options::OPT_include); + std::vector<std::string> Includes = + Args.getAllArgValues(options::OPT_include); for (const auto &Include : Includes) CmdArgs.push_back(Args.MakeArgString(std::string("/FI") + Include)); diff --git a/contrib/llvm/tools/clang/lib/Driver/Tools.h b/contrib/llvm/tools/clang/lib/Driver/Tools.h index 5aea825..25fe063 100644 --- a/contrib/llvm/tools/clang/lib/Driver/Tools.h +++ b/contrib/llvm/tools/clang/lib/Driver/Tools.h @@ -40,7 +40,7 @@ using llvm::opt::ArgStringList; class LLVM_LIBRARY_VISIBILITY Clang : public Tool { public: static const char *getBaseInputName(const llvm::opt::ArgList &Args, - const InputInfoList &Inputs); + const InputInfo &Input); static const char *getBaseInputStem(const llvm::opt::ArgList &Args, const InputInfoList &Inputs); static const char *getDependencyFileName(const llvm::opt::ArgList &Args, @@ -109,7 +109,8 @@ using llvm::opt::ArgStringList; ClangAs(const ToolChain &TC) : Tool("clang::as", "clang integrated assembler", TC, RF_Full) {} - + void AddMIPSTargetArgs(const llvm::opt::ArgList &Args, + llvm::opt::ArgStringList &CmdArgs) const; bool hasGoodDiagnostics() const override { return true; } bool hasIntegratedAssembler() const override { return false; } bool hasIntegratedCPP() const override { return false; } @@ -225,14 +226,21 @@ namespace hexagon { namespace arm { StringRef getARMTargetCPU(const llvm::opt::ArgList &Args, const llvm::Triple &Triple); + const StringRef getARMArch(const llvm::opt::ArgList &Args, + const llvm::Triple &Triple); const char* getARMCPUForMArch(const llvm::opt::ArgList &Args, const llvm::Triple &Triple); - const char* getLLVMArchSuffixForARM(StringRef CPU); + const char* getLLVMArchSuffixForARM(StringRef CPU, StringRef Arch); void appendEBLinkFlags(const llvm::opt::ArgList &Args, ArgStringList &CmdArgs, const llvm::Triple &Triple); } namespace mips { + typedef enum { + NanLegacy = 1, + Nan2008 = 2 + } NanEncoding; + NanEncoding getSupportedNanEncoding(StringRef &CPU); void getMipsCPUAndABI(const llvm::opt::ArgList &Args, const llvm::Triple &Triple, StringRef &CPUName, StringRef &ABIName); @@ -247,6 +255,22 @@ namespace ppc { bool hasPPCAbiArg(const llvm::opt::ArgList &Args, const char *Value); } + /// cloudabi -- Directly call GNU Binutils linker +namespace cloudabi { +class LLVM_LIBRARY_VISIBILITY Link : public GnuTool { +public: + Link(const ToolChain &TC) : GnuTool("cloudabi::Link", "linker", TC) {} + + bool hasIntegratedCPP() const override { return false; } + bool isLinkJob() const override { return true; } + + void ConstructJob(Compilation &C, const JobAction &JA, + const InputInfo &Output, const InputInfoList &Inputs, + const llvm::opt::ArgList &TCArgs, + const char *LinkingOutput) const override; +}; +} // end namespace cloudabi + namespace darwin { llvm::Triple::ArchType getArchTypeForMachOArchName(StringRef Str); void setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str); @@ -490,6 +514,33 @@ namespace gnutools { const char *LinkingOutput) const override; }; } + +namespace nacltools { + class LLVM_LIBRARY_VISIBILITY AssembleARM : public gnutools::Assemble { + public: + AssembleARM(const ToolChain &TC) : gnutools::Assemble(TC) {} + + void ConstructJob(Compilation &C, const JobAction &JA, + const InputInfo &Output, + const InputInfoList &Inputs, + const llvm::opt::ArgList &TCArgs, + const char *LinkingOutput) const override; + }; + class LLVM_LIBRARY_VISIBILITY Link : public Tool { + public: + Link(const ToolChain &TC) : Tool("NaCl::Link", "linker", TC) {} + + bool hasIntegratedCPP() const override { return false; } + bool isLinkJob() const override { return true; } + + void ConstructJob(Compilation &C, const JobAction &JA, + const InputInfo &Output, + const InputInfoList &Inputs, + const llvm::opt::ArgList &TCArgs, + const char *LinkingOutput) const override; + }; +} + /// minix -- Directly call GNU Binutils assembler and linker namespace minix { class LLVM_LIBRARY_VISIBILITY Assemble : public GnuTool { diff --git a/contrib/llvm/tools/clang/lib/Driver/Types.cpp b/contrib/llvm/tools/clang/lib/Driver/Types.cpp index 6ee764c..7b28145 100644 --- a/contrib/llvm/tools/clang/lib/Driver/Types.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/Types.cpp @@ -85,7 +85,7 @@ bool types::isAcceptedByClang(ID Id) { case TY_Asm: case TY_C: case TY_PP_C: case TY_CL: - case TY_CUDA: + case TY_CUDA: case TY_PP_CUDA: case TY_ObjC: case TY_PP_ObjC: case TY_PP_ObjC_Alias: case TY_CXX: case TY_PP_CXX: case TY_ObjCXX: case TY_PP_ObjCXX: case TY_PP_ObjCXX_Alias: @@ -122,7 +122,7 @@ bool types::isCXX(ID Id) { case TY_ObjCXX: case TY_PP_ObjCXX: case TY_PP_ObjCXX_Alias: case TY_CXXHeader: case TY_PP_CXXHeader: case TY_ObjCXXHeader: case TY_PP_ObjCXXHeader: - case TY_CUDA: + case TY_CUDA: case TY_PP_CUDA: return true; } } @@ -153,6 +153,7 @@ types::ID types::lookupTypeForExtension(const char *Ext) { .Case("cl", TY_CL) .Case("cp", TY_CXX) .Case("cu", TY_CUDA) + .Case("cui", TY_PP_CUDA) .Case("hh", TY_CXXHeader) .Case("ll", TY_LLVM_IR) .Case("hpp", TY_CXXHeader) diff --git a/contrib/llvm/tools/clang/lib/Edit/EditedSource.cpp b/contrib/llvm/tools/clang/lib/Edit/EditedSource.cpp index 1c66cb8..e557de9 100644 --- a/contrib/llvm/tools/clang/lib/Edit/EditedSource.cpp +++ b/contrib/llvm/tools/clang/lib/Edit/EditedSource.cpp @@ -135,7 +135,7 @@ bool EditedSource::commitInsertFromRange(SourceLocation OrigLoc, StrVec += text; } - return commitInsert(OrigLoc, Offs, StrVec.str(), beforePreviousInsertions); + return commitInsert(OrigLoc, Offs, StrVec, beforePreviousInsertions); } void EditedSource::commitRemove(SourceLocation OrigLoc, @@ -295,9 +295,11 @@ static void adjustRemoval(const SourceManager &SM, const LangOptions &LangOpts, } if (buffer[end] == ' ') { + assert((end + 1 != buffer.size() || buffer.data()[end + 1] == 0) && + "buffer not zero-terminated!"); if (canRemoveWhitespace(/*left=*/buffer[begin-1], /*beforeWSpace=*/buffer[end-1], - /*right=*/buffer[end+1], + /*right=*/buffer.data()[end + 1], // zero-terminated LangOpts)) ++len; return; @@ -360,14 +362,14 @@ void EditedSource::applyRewrites(EditsReceiver &receiver) { continue; } - applyRewrite(receiver, StrVec.str(), CurOffs, CurLen, SourceMgr, LangOpts); + applyRewrite(receiver, StrVec, CurOffs, CurLen, SourceMgr, LangOpts); CurOffs = offs; StrVec = act.Text; CurLen = act.RemoveLen; CurEnd = CurOffs.getWithOffset(CurLen); } - applyRewrite(receiver, StrVec.str(), CurOffs, CurLen, SourceMgr, LangOpts); + applyRewrite(receiver, StrVec, CurOffs, CurLen, SourceMgr, LangOpts); } void EditedSource::clearRewrites() { diff --git a/contrib/llvm/tools/clang/lib/Format/BreakableToken.cpp b/contrib/llvm/tools/clang/lib/Format/BreakableToken.cpp index 26f1371..66e935a 100644 --- a/contrib/llvm/tools/clang/lib/Format/BreakableToken.cpp +++ b/contrib/llvm/tools/clang/lib/Format/BreakableToken.cpp @@ -106,7 +106,7 @@ getStringSplit(StringRef Text, unsigned UsedColumns, unsigned ColumnLimit, Text.substr(0, Advance), UsedColumns + Chars, TabWidth, Encoding); } - if (Chars > MaxSplit || Text.size() == Advance) + if (Chars > MaxSplit || Text.size() <= Advance) break; if (IsBlank(Text[0])) @@ -277,6 +277,8 @@ BreakableBlockComment::BreakableBlockComment( // If the last line is empty, the closing "*/" will have a star. if (i + 1 == e && Lines[i].empty()) break; + if (!Lines[i].empty() && i + 1 != e && Decoration.startswith(Lines[i])) + continue; while (!Lines[i].startswith(Decoration)) Decoration = Decoration.substr(0, Decoration.size() - 1); } @@ -297,14 +299,18 @@ BreakableBlockComment::BreakableBlockComment( } continue; } + // The first line already excludes the star. // For all other lines, adjust the line to exclude the star and // (optionally) the first whitespace. - StartOfLineColumn[i] += Decoration.size(); - Lines[i] = Lines[i].substr(Decoration.size()); - LeadingWhitespace[i] += Decoration.size(); - IndentAtLineBreak = - std::min<int>(IndentAtLineBreak, std::max(0, StartOfLineColumn[i])); + unsigned DecorationSize = + Decoration.startswith(Lines[i]) ? Lines[i].size() : Decoration.size(); + StartOfLineColumn[i] += DecorationSize; + Lines[i] = Lines[i].substr(DecorationSize); + LeadingWhitespace[i] += DecorationSize; + if (!Decoration.startswith(Lines[i])) + IndentAtLineBreak = + std::min<int>(IndentAtLineBreak, std::max(0, StartOfLineColumn[i])); } IndentAtLineBreak = std::max<unsigned>(IndentAtLineBreak, Decoration.size()); DEBUG({ diff --git a/contrib/llvm/tools/clang/lib/Format/ContinuationIndenter.cpp b/contrib/llvm/tools/clang/lib/Format/ContinuationIndenter.cpp index 4cc92b0..4e8f5af 100644 --- a/contrib/llvm/tools/clang/lib/Format/ContinuationIndenter.cpp +++ b/contrib/llvm/tools/clang/lib/Format/ContinuationIndenter.cpp @@ -143,11 +143,10 @@ bool ContinuationIndenter::mustBreak(const LineState &State) { if (Previous.is(tok::semi) && State.LineContainsContinuedForLoopSection) return true; if ((startsNextParameter(Current, Style) || Previous.is(tok::semi) || - (Style.BreakBeforeTernaryOperators && - (Current.is(tok::question) || - (Current.is(TT_ConditionalExpr) && Previous.isNot(tok::question)))) || + (Style.BreakBeforeTernaryOperators && Current.is(TT_ConditionalExpr) && + Previous.isNot(tok::question)) || (!Style.BreakBeforeTernaryOperators && - (Previous.is(tok::question) || Previous.is(TT_ConditionalExpr)))) && + Previous.is(TT_ConditionalExpr))) && State.Stack.back().BreakBeforeParameter && !Current.isTrailingComment() && !Current.isOneOf(tok::r_paren, tok::r_brace)) return true; @@ -166,10 +165,17 @@ bool ContinuationIndenter::mustBreak(const LineState &State) { ((Style.AllowShortFunctionsOnASingleLine != FormatStyle::SFS_All) || Style.BreakConstructorInitializersBeforeComma || Style.ColumnLimit != 0)) return true; + if (Current.is(TT_SelectorName) && State.Stack.back().ObjCSelectorNameFound && + State.Stack.back().BreakBeforeParameter) + return true; if (State.Column < getNewLineColumn(State)) return false; - if (Style.BreakBeforeBinaryOperators == FormatStyle::BOS_None) { + + // Using CanBreakBefore here and below takes care of the decision whether the + // current style uses wrapping before or after operators for the given + // operator. + if (Previous.is(TT_BinaryOperator) && Current.CanBreakBefore) { // If we need to break somewhere inside the LHS of a binary expression, we // should also break after the operator. Otherwise, the formatting would // hide the operator precedence, e.g. in: @@ -185,16 +191,13 @@ bool ContinuationIndenter::mustBreak(const LineState &State) { Previous.Previous->isNot(TT_BinaryOperator); // For >>. bool LHSIsBinaryExpr = Previous.Previous && Previous.Previous->EndsBinaryExpression; - if (Previous.is(TT_BinaryOperator) && (!IsComparison || LHSIsBinaryExpr) && - Current.isNot(TT_BinaryOperator) && // For >>. - !Current.isTrailingComment() && !Previous.is(tok::lessless) && + if ((!IsComparison || LHSIsBinaryExpr) && !Current.isTrailingComment() && Previous.getPrecedence() != prec::Assignment && State.Stack.back().BreakBeforeParameter) return true; - } else { - if (Current.is(TT_BinaryOperator) && Previous.EndsBinaryExpression && - State.Stack.back().BreakBeforeParameter) - return true; + } else if (Current.is(TT_BinaryOperator) && Current.CanBreakBefore && + State.Stack.back().BreakBeforeParameter) { + return true; } // Same as above, but for the first "<<" operator. @@ -203,12 +206,14 @@ bool ContinuationIndenter::mustBreak(const LineState &State) { State.Stack.back().FirstLessLess == 0) return true; - if (Current.is(TT_SelectorName) && State.Stack.back().ObjCSelectorNameFound && - State.Stack.back().BreakBeforeParameter) - return true; if (Current.NestingLevel == 0 && !Current.isTrailingComment()) { + // Always break after "template <...>" and leading annotations. This is only + // for cases where the entire line does not fit on a single line as a + // different LineFormatter would be used otherwise. if (Previous.ClosesTemplateDeclaration) return true; + if (Previous.is(TT_FunctionAnnotationRParen)) + return true; if (Previous.is(TT_LeadingJavaAnnotation) && Current.isNot(tok::l_paren) && Current.isNot(TT_LeadingJavaAnnotation)) return true; @@ -221,8 +226,7 @@ bool ContinuationIndenter::mustBreak(const LineState &State) { if (startsSegmentOfBuilderTypeCall(Current) && (State.Stack.back().CallContinuation != 0 || - (State.Stack.back().BreakBeforeParameter && - State.Stack.back().ContainsUnwrappedBuilder))) + State.Stack.back().BreakBeforeParameter)) return true; // The following could be precomputed as they do not depend on the state. @@ -232,6 +236,10 @@ bool ContinuationIndenter::mustBreak(const LineState &State) { Previous.is(tok::l_brace) && !Current.isOneOf(tok::r_brace, tok::comment)) return true; + if (Current.is(tok::lessless) && Previous.is(tok::identifier) && + Previous.TokenText == "endl") + return true; + return false; } @@ -245,12 +253,18 @@ unsigned ContinuationIndenter::addTokenToState(LineState &State, bool Newline, (Current.Previous->Tok.getIdentifierInfo() == nullptr || Current.Previous->Tok.getIdentifierInfo()->getPPKeywordID() == tok::pp_not_keyword))) { - // FIXME: Is this correct? - int WhitespaceLength = SourceMgr.getSpellingColumnNumber( - State.NextToken->WhitespaceRange.getEnd()) - - SourceMgr.getSpellingColumnNumber( - State.NextToken->WhitespaceRange.getBegin()); - State.Column += WhitespaceLength; + unsigned EndColumn = + SourceMgr.getSpellingColumnNumber(Current.WhitespaceRange.getEnd()); + if (Current.LastNewlineOffset != 0) { + // If there is a newline within this token, the final column will solely + // determined by the current end column. + State.Column = EndColumn; + } else { + unsigned StartColumn = + SourceMgr.getSpellingColumnNumber(Current.WhitespaceRange.getBegin()); + assert(EndColumn >= StartColumn); + State.Column += EndColumn - StartColumn; + } moveStateToNextToken(State, DryRun, /*Newline=*/false); return 0; } @@ -297,7 +311,9 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun, else if (State.Stack.back().Indent + Current.LongestObjCSelectorName > State.Column + Spaces + Current.ColumnWidth) State.Stack.back().ColonPos = - State.Stack.back().Indent + Current.LongestObjCSelectorName; + std::max(State.FirstIndent + Style.ContinuationIndentWidth, + State.Stack.back().Indent) + + Current.LongestObjCSelectorName; else State.Stack.back().ColonPos = State.Column + Spaces + Current.ColumnWidth; } @@ -308,9 +324,12 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun, State.Stack.back().Indent = State.Column + Spaces; if (State.Stack.back().AvoidBinPacking && startsNextParameter(Current, Style)) State.Stack.back().NoLineBreak = true; - if (startsSegmentOfBuilderTypeCall(Current)) + if (startsSegmentOfBuilderTypeCall(Current) && + State.Column > getNewLineColumn(State)) State.Stack.back().ContainsUnwrappedBuilder = true; + if (Current.is(TT_LambdaArrow)) + State.Stack.back().NoLineBreak = true; if (Current.isMemberAccess() && Previous.is(tok::r_paren) && (Previous.MatchingParen && (Previous.TotalLength - Previous.MatchingParen->TotalLength > 10))) { @@ -359,7 +378,7 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun, const FormatToken *Next = Previous.MatchingParen->getNextNonComment(); HasTrailingCall = Next && Next->isMemberAccess(); } - if (HasTrailingCall && + if (HasTrailingCall && State.Stack.size() > 1 && State.Stack[State.Stack.size() - 2].CallContinuation == 0) State.Stack.back().LastSpace = State.Column; } @@ -406,7 +425,11 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State, State.Stack.back().AlignColons = false; } else { State.Stack.back().ColonPos = - State.Stack.back().Indent + NextNonComment->LongestObjCSelectorName; + (Style.IndentWrappedFunctionNames + ? std::max(State.Stack.back().Indent, + State.FirstIndent + Style.ContinuationIndentWidth) + : State.Stack.back().Indent) + + NextNonComment->LongestObjCSelectorName; } } else if (State.Stack.back().AlignColons && State.Stack.back().ColonPos <= NextNonComment->ColumnWidth) { @@ -468,8 +491,9 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State, !PreviousNonComment->isOneOf(tok::comma, tok::semi) && (PreviousNonComment->isNot(TT_TemplateCloser) || Current.NestingLevel != 0) && - !PreviousNonComment->isOneOf(TT_BinaryOperator, TT_JavaAnnotation, - TT_LeadingJavaAnnotation) && + !PreviousNonComment->isOneOf( + TT_BinaryOperator, TT_FunctionAnnotationRParen, TT_JavaAnnotation, + TT_LeadingJavaAnnotation) && Current.isNot(TT_BinaryOperator) && !PreviousNonComment->opensScope()) State.Stack.back().BreakBeforeParameter = true; @@ -516,7 +540,7 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) { if (NextNonComment->is(tok::l_brace) && NextNonComment->BlockKind == BK_Block) return Current.NestingLevel == 0 ? State.FirstIndent : State.Stack.back().Indent; - if (Current.isOneOf(tok::r_brace, tok::r_square)) { + if (Current.isOneOf(tok::r_brace, tok::r_square) && State.Stack.size() > 1) { if (Current.closesBlockTypeList(Style)) return State.Stack[State.Stack.size() - 2].NestedBlockIndent; if (Current.MatchingParen && @@ -529,6 +553,9 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) { return State.Stack.back().Indent; if (NextNonComment->isStringLiteral() && State.StartOfStringLiteral != 0) return State.StartOfStringLiteral; + if (NextNonComment->is(TT_ObjCStringLiteral) && + State.StartOfStringLiteral != 0) + return State.StartOfStringLiteral - 1; if (NextNonComment->is(tok::lessless) && State.Stack.back().FirstLessLess != 0) return State.Stack.back().FirstLessLess; @@ -546,8 +573,9 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) { return State.Stack.back().VariablePos; if ((PreviousNonComment && (PreviousNonComment->ClosesTemplateDeclaration || - PreviousNonComment->isOneOf(TT_AttributeParen, TT_JavaAnnotation, - TT_LeadingJavaAnnotation))) || + PreviousNonComment->isOneOf( + TT_AttributeParen, TT_FunctionAnnotationRParen, TT_JavaAnnotation, + TT_LeadingJavaAnnotation))) || (!Style.IndentWrappedFunctionNames && NextNonComment->isOneOf(tok::kw_operator, TT_FunctionDeclarationName))) return std::max(State.Stack.back().LastSpace, State.Stack.back().Indent); @@ -555,7 +583,10 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) { if (!State.Stack.back().ObjCSelectorNameFound) { if (NextNonComment->LongestObjCSelectorName == 0) return State.Stack.back().Indent; - return State.Stack.back().Indent + + return (Style.IndentWrappedFunctionNames + ? std::max(State.Stack.back().Indent, + State.FirstIndent + Style.ContinuationIndentWidth) + : State.Stack.back().Indent) + NextNonComment->LongestObjCSelectorName - NextNonComment->ColumnWidth; } @@ -570,10 +601,16 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) { return State.Stack.back().StartOfArraySubscripts; return ContinuationIndent; } - if (NextNonComment->is(TT_StartOfName) || - Previous.isOneOf(tok::coloncolon, tok::equal)) { + + // This ensure that we correctly format ObjC methods calls without inputs, + // i.e. where the last element isn't selector like: [callee method]; + if (NextNonComment->is(tok::identifier) && NextNonComment->FakeRParens == 0 && + NextNonComment->Next && NextNonComment->Next->is(TT_ObjCMethodExpr)) + return State.Stack.back().Indent; + + if (NextNonComment->isOneOf(TT_StartOfName, TT_PointerOrReference) || + Previous.isOneOf(tok::coloncolon, tok::equal)) return ContinuationIndent; - } if (PreviousNonComment && PreviousNonComment->is(tok::colon) && PreviousNonComment->isOneOf(TT_ObjCMethodExpr, TT_DictLiteral)) return ContinuationIndent; @@ -621,7 +658,7 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State, std::min(State.LowestLevelOnLine, Current.NestingLevel); if (Current.isMemberAccess()) State.Stack.back().StartOfFunctionCall = - Current.LastOperator ? 0 : State.Column + Current.ColumnWidth; + Current.LastOperator ? 0 : State.Column; if (Current.is(TT_SelectorName)) State.Stack.back().ObjCSelectorNameFound = true; if (Current.is(TT_CtorInitializerColon)) { @@ -637,12 +674,9 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State, State.Stack.back().AvoidBinPacking = true; State.Stack.back().BreakBeforeParameter = false; } - - // In ObjC method declaration we align on the ":" of parameters, but we need - // to ensure that we indent parameters on subsequent lines by at least our - // continuation indent width. - if (Current.is(TT_ObjCMethodSpecifier)) - State.Stack.back().Indent += Style.ContinuationIndentWidth; + if (Current.isOneOf(TT_BinaryOperator, TT_ConditionalExpr) && Newline) + State.Stack.back().NestedBlockIndent = + State.Column + Current.ColumnWidth + 1; // Insert scopes created by fake parenthesis. const FormatToken *Previous = Current.getPreviousNonComment(); @@ -675,12 +709,13 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State, moveStatePastScopeCloser(State); moveStatePastFakeRParens(State); - if (Current.isStringLiteral() && State.StartOfStringLiteral == 0) { + if (Current.isStringLiteral() && State.StartOfStringLiteral == 0) State.StartOfStringLiteral = State.Column; - } else if (!Current.isOneOf(tok::comment, tok::identifier, tok::hash) && - !Current.isStringLiteral()) { + if (Current.is(TT_ObjCStringLiteral) && State.StartOfStringLiteral == 0) + State.StartOfStringLiteral = State.Column + 1; + else if (!Current.isOneOf(tok::comment, tok::identifier, tok::hash) && + !Current.isStringLiteral()) State.StartOfStringLiteral = 0; - } State.Column += Current.ColumnWidth; State.NextToken = State.NextToken->Next; @@ -712,7 +747,8 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State, // 'return', assignments or opening <({[. The indentation for these cases // is special cased. bool SkipFirstExtraIndent = - (Previous && (Previous->opensScope() || Previous->is(tok::kw_return) || + (Previous && (Previous->opensScope() || + Previous->isOneOf(tok::semi, tok::kw_return) || (Previous->getPrecedence() == prec::Assignment && Style.AlignOperands) || Previous->is(TT_ObjCMethodExpr))); @@ -783,7 +819,6 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State, void ContinuationIndenter::moveStatePastFakeRParens(LineState &State) { for (unsigned i = 0, e = State.NextToken->FakeRParens; i != e; ++i) { unsigned VariablePos = State.Stack.back().VariablePos; - assert(State.Stack.size() > 1); if (State.Stack.size() == 1) { // Do not pop the last element. break; @@ -806,6 +841,7 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State, unsigned NewIndent; unsigned NewIndentLevel = State.Stack.back().IndentLevel; + unsigned LastSpace = State.Stack.back().LastSpace; bool AvoidBinPacking; bool BreakBeforeParameter = false; if (Current.isOneOf(tok::l_brace, TT_ArrayInitializerLSquare)) { @@ -815,17 +851,28 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State, ++NewIndentLevel; } else { NewIndent = State.Stack.back().LastSpace + Style.ContinuationIndentWidth; - NewIndent = std::min(State.Column + 1, NewIndent); } const FormatToken *NextNoComment = Current.getNextNonComment(); AvoidBinPacking = Current.isOneOf(TT_ArrayInitializerLSquare, TT_DictLiteral) || - Style.Language == FormatStyle::LK_Proto || !Style.BinPackParameters || + Style.Language == FormatStyle::LK_Proto || !Style.BinPackArguments || (NextNoComment && NextNoComment->is(TT_DesignatedInitializerPeriod)); } else { NewIndent = Style.ContinuationIndentWidth + std::max(State.Stack.back().LastSpace, State.Stack.back().StartOfFunctionCall); + + // Ensure that different different brackets force relative alignment, e.g.: + // void SomeFunction(vector< // break + // int> v); + // FIXME: We likely want to do this for more combinations of brackets. + // Verify that it is wanted for ObjC, too. + if (Current.Tok.getKind() == tok::less && + Current.ParentBracket == tok::l_paren) { + NewIndent = std::max(NewIndent, State.Stack.back().Indent); + LastSpace = std::max(LastSpace, State.Stack.back().Indent); + } + AvoidBinPacking = (State.Line->MustBeDeclaration && !Style.BinPackParameters) || (!State.Line->MustBeDeclaration && !Style.BinPackArguments) || @@ -833,19 +880,33 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State, (Current.PackingKind == PPK_OnePerLine || (!BinPackInconclusiveFunctions && Current.PackingKind == PPK_Inconclusive))); - // If this '[' opens an ObjC call, determine whether all parameters fit - // into one line and put one per line if they don't. - if (Current.is(TT_ObjCMethodExpr) && Style.ColumnLimit != 0 && - getLengthToMatchingParen(Current) + State.Column > + if (Current.is(TT_ObjCMethodExpr) && Current.MatchingParen) { + if (Style.ColumnLimit) { + // If this '[' opens an ObjC call, determine whether all parameters fit + // into one line and put one per line if they don't. + if (getLengthToMatchingParen(Current) + State.Column > getColumnLimit(State)) - BreakBeforeParameter = true; + BreakBeforeParameter = true; + } else { + // For ColumnLimit = 0, we have to figure out whether there is or has to + // be a line break within this call. + for (const FormatToken *Tok = &Current; + Tok && Tok != Current.MatchingParen; Tok = Tok->Next) { + if (Tok->MustBreakBefore || + (Tok->CanBreakBefore && Tok->NewlinesBefore > 0)) { + BreakBeforeParameter = true; + break; + } + } + } + } } bool NoLineBreak = State.Stack.back().NoLineBreak || (Current.is(TT_TemplateOpener) && State.Stack.back().ContainsUnwrappedBuilder); - unsigned NestedBlockIndent = State.Stack.back().NestedBlockIndent; - State.Stack.push_back(ParenState(NewIndent, NewIndentLevel, - State.Stack.back().LastSpace, + unsigned NestedBlockIndent = std::max(State.Stack.back().StartOfFunctionCall, + State.Stack.back().NestedBlockIndent); + State.Stack.push_back(ParenState(NewIndent, NewIndentLevel, LastSpace, AvoidBinPacking, NoLineBreak)); State.Stack.back().NestedBlockIndent = NestedBlockIndent; State.Stack.back().BreakBeforeParameter = BreakBeforeParameter; @@ -1082,8 +1143,9 @@ bool ContinuationIndenter::nextIsMultilineString(const LineState &State) { if (Current.getNextNonComment() && Current.getNextNonComment()->isStringLiteral()) return true; // Implicit concatenation. - if (State.Column + Current.ColumnWidth + Current.UnbreakableTailLength > - Style.ColumnLimit) + if (Style.ColumnLimit != 0 && + State.Column + Current.ColumnWidth + Current.UnbreakableTailLength > + Style.ColumnLimit) return true; // String will be split. return false; } diff --git a/contrib/llvm/tools/clang/lib/Format/Format.cpp b/contrib/llvm/tools/clang/lib/Format/Format.cpp index 2a4721f..10c68f9 100644 --- a/contrib/llvm/tools/clang/lib/Format/Format.cpp +++ b/contrib/llvm/tools/clang/lib/Format/Format.cpp @@ -109,10 +109,8 @@ struct ScalarEnumerationTraits<FormatStyle::NamespaceIndentationKind> { } }; -template <> -struct ScalarEnumerationTraits<FormatStyle::PointerAlignmentStyle> { - static void enumeration(IO &IO, - FormatStyle::PointerAlignmentStyle &Value) { +template <> struct ScalarEnumerationTraits<FormatStyle::PointerAlignmentStyle> { + static void enumeration(IO &IO, FormatStyle::PointerAlignmentStyle &Value) { IO.enumCase(Value, "Middle", FormatStyle::PAS_Middle); IO.enumCase(Value, "Left", FormatStyle::PAS_Left); IO.enumCase(Value, "Right", FormatStyle::PAS_Right); @@ -144,8 +142,8 @@ template <> struct MappingTraits<FormatStyle> { IO.mapOptional("Language", Style.Language); if (IO.outputting()) { - StringRef StylesArray[] = { "LLVM", "Google", "Chromium", - "Mozilla", "WebKit", "GNU" }; + StringRef StylesArray[] = {"LLVM", "Google", "Chromium", + "Mozilla", "WebKit", "GNU"}; ArrayRef<StringRef> Styles(StylesArray); for (size_t i = 0, e = Styles.size(); i < e; ++i) { StringRef StyleName(Styles[i]); @@ -176,6 +174,7 @@ template <> struct MappingTraits<FormatStyle> { IO.mapOptional("AlignEscapedNewlinesLeft", Style.AlignEscapedNewlinesLeft); IO.mapOptional("AlignOperands", Style.AlignOperands); IO.mapOptional("AlignTrailingComments", Style.AlignTrailingComments); + IO.mapOptional("AlignConsecutiveAssignments", Style.AlignConsecutiveAssignments); IO.mapOptional("AllowAllParametersOfDeclarationOnNextLine", Style.AllowAllParametersOfDeclarationOnNextLine); IO.mapOptional("AllowShortBlocksOnASingleLine", @@ -273,7 +272,7 @@ template <> struct MappingTraits<FormatStyle> { // will be used to get default values for missing keys. // If the first element has no Language specified, it will be treated as the // default one for the following elements. -template <> struct DocumentListTraits<std::vector<FormatStyle> > { +template <> struct DocumentListTraits<std::vector<FormatStyle>> { static size_t size(IO &IO, std::vector<FormatStyle> &Seq) { return Seq.size(); } @@ -331,6 +330,7 @@ FormatStyle getLLVMStyle() { LLVMStyle.AlignAfterOpenBracket = true; LLVMStyle.AlignOperands = true; LLVMStyle.AlignTrailingComments = true; + LLVMStyle.AlignConsecutiveAssignments = false; LLVMStyle.AllowAllParametersOfDeclarationOnNextLine = true; LLVMStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_All; LLVMStyle.AllowShortBlocksOnASingleLine = false; @@ -600,10 +600,10 @@ public: FormatTokenLexer(SourceManager &SourceMgr, FileID ID, FormatStyle &Style, encoding::Encoding Encoding) : FormatTok(nullptr), IsFirstToken(true), GreaterStashed(false), - Column(0), TrailingWhitespace(0), SourceMgr(SourceMgr), ID(ID), - Style(Style), IdentTable(getFormattingLangOpts(Style)), - Keywords(IdentTable), Encoding(Encoding), FirstInLineIndex(0), - FormattingDisabled(false) { + LessStashed(false), Column(0), TrailingWhitespace(0), + SourceMgr(SourceMgr), ID(ID), Style(Style), + IdentTable(getFormattingLangOpts(Style)), Keywords(IdentTable), + Encoding(Encoding), FirstInLineIndex(0), FormattingDisabled(false) { Lex.reset(new Lexer(ID, SourceMgr.getBuffer(ID), SourceMgr, getFormattingLangOpts(Style))); Lex->SetKeepWhitespaceMode(true); @@ -619,7 +619,7 @@ public: do { Tokens.push_back(getNextToken()); tryMergePreviousTokens(); - if (Tokens.back()->NewlinesBefore > 0) + if (Tokens.back()->NewlinesBefore > 0 || Tokens.back()->IsMultiline) FirstInLineIndex = Tokens.size() - 1; } while (Tokens.back()->Tok.isNot(tok::eof)); return Tokens; @@ -633,32 +633,62 @@ private: return; if (tryMergeConflictMarkers()) return; + if (tryMergeLessLess()) + return; if (Style.Language == FormatStyle::LK_JavaScript) { if (tryMergeJSRegexLiteral()) return; if (tryMergeEscapeSequence()) return; + if (tryMergeTemplateString()) + return; - static tok::TokenKind JSIdentity[] = { tok::equalequal, tok::equal }; - static tok::TokenKind JSNotIdentity[] = { tok::exclaimequal, tok::equal }; - static tok::TokenKind JSShiftEqual[] = { tok::greater, tok::greater, - tok::greaterequal }; - static tok::TokenKind JSRightArrow[] = { tok::equal, tok::greater }; - // FIXME: We probably need to change token type to mimic operator with the - // correct priority. - if (tryMergeTokens(JSIdentity)) + static const tok::TokenKind JSIdentity[] = {tok::equalequal, tok::equal}; + static const tok::TokenKind JSNotIdentity[] = {tok::exclaimequal, + tok::equal}; + static const tok::TokenKind JSShiftEqual[] = {tok::greater, tok::greater, + tok::greaterequal}; + static const tok::TokenKind JSRightArrow[] = {tok::equal, tok::greater}; + // FIXME: Investigate what token type gives the correct operator priority. + if (tryMergeTokens(JSIdentity, TT_BinaryOperator)) return; - if (tryMergeTokens(JSNotIdentity)) + if (tryMergeTokens(JSNotIdentity, TT_BinaryOperator)) return; - if (tryMergeTokens(JSShiftEqual)) + if (tryMergeTokens(JSShiftEqual, TT_BinaryOperator)) return; - if (tryMergeTokens(JSRightArrow)) + if (tryMergeTokens(JSRightArrow, TT_JsFatArrow)) return; } } - bool tryMergeTokens(ArrayRef<tok::TokenKind> Kinds) { + bool tryMergeLessLess() { + // Merge X,less,less,Y into X,lessless,Y unless X or Y is less. + if (Tokens.size() < 3) + return false; + + bool FourthTokenIsLess = false; + if (Tokens.size() > 3) + FourthTokenIsLess = (Tokens.end() - 4)[0]->is(tok::less); + + auto First = Tokens.end() - 3; + if (First[2]->is(tok::less) || First[1]->isNot(tok::less) || + First[0]->isNot(tok::less) || FourthTokenIsLess) + return false; + + // Only merge if there currently is no whitespace between the two "<". + if (First[1]->WhitespaceRange.getBegin() != + First[1]->WhitespaceRange.getEnd()) + return false; + + First[0]->Tok.setKind(tok::lessless); + First[0]->TokenText = "<<"; + First[0]->ColumnWidth += 1; + Tokens.erase(Tokens.end() - 2); + return true; + } + + bool tryMergeTokens(ArrayRef<tok::TokenKind> Kinds, TokenType NewType) { if (Tokens.size() < Kinds.size()) return false; @@ -668,8 +698,9 @@ private: return false; unsigned AddLength = 0; for (unsigned i = 1; i < Kinds.size(); ++i) { - if (!First[i]->is(Kinds[i]) || First[i]->WhitespaceRange.getBegin() != - First[i]->WhitespaceRange.getEnd()) + if (!First[i]->is(Kinds[i]) || + First[i]->WhitespaceRange.getBegin() != + First[i]->WhitespaceRange.getEnd()) return false; AddLength += First[i]->TokenText.size(); } @@ -677,6 +708,7 @@ private: First[0]->TokenText = StringRef(First[0]->TokenText.data(), First[0]->TokenText.size() + AddLength); First[0]->ColumnWidth += AddLength; + First[0]->Type = NewType; return true; } @@ -720,7 +752,7 @@ private: unsigned LastColumn = Tokens.back()->OriginalColumn; for (auto I = Tokens.rbegin() + 1, E = Tokens.rend(); I != E; ++I) { ++TokenCount; - if (I[0]->is(tok::slash) && I + 1 != E && + if (I[0]->isOneOf(tok::slash, tok::slashequal) && I + 1 != E && (I[1]->isOneOf(tok::l_paren, tok::semi, tok::l_brace, tok::r_brace, tok::exclaim, tok::l_square, tok::colon, tok::comma, tok::question, tok::kw_return) || @@ -745,6 +777,91 @@ private: return false; } + bool tryMergeTemplateString() { + if (Tokens.size() < 2) + return false; + + FormatToken *EndBacktick = Tokens.back(); + // Backticks get lexed as tok::unknown tokens. If a template string contains + // a comment start, it gets lexed as a tok::comment, or tok::unknown if + // unterminated. + if (!EndBacktick->isOneOf(tok::comment, tok::unknown)) + return false; + size_t CommentBacktickPos = EndBacktick->TokenText.find('`'); + // Unknown token that's not actually a backtick, or a comment that doesn't + // contain a backtick. + if (CommentBacktickPos == StringRef::npos) + return false; + + unsigned TokenCount = 0; + bool IsMultiline = false; + unsigned EndColumnInFirstLine = + EndBacktick->OriginalColumn + EndBacktick->ColumnWidth; + for (auto I = Tokens.rbegin() + 1, E = Tokens.rend(); I != E; I++) { + ++TokenCount; + if (I[0]->NewlinesBefore > 0 || I[0]->IsMultiline) + IsMultiline = true; + + // If there was a preceding template string, this must be the start of a + // template string, not the end. + if (I[0]->is(TT_TemplateString)) + return false; + + if (I[0]->isNot(tok::unknown) || I[0]->TokenText != "`") { + // Keep track of the rhs offset of the last token to wrap across lines - + // its the rhs offset of the first line of the template string, used to + // determine its width. + if (I[0]->IsMultiline) + EndColumnInFirstLine = I[0]->OriginalColumn + I[0]->ColumnWidth; + // If the token has newlines, the token before it (if it exists) is the + // rhs end of the previous line. + if (I[0]->NewlinesBefore > 0 && (I + 1 != E)) + EndColumnInFirstLine = I[1]->OriginalColumn + I[1]->ColumnWidth; + + continue; + } + + Tokens.resize(Tokens.size() - TokenCount); + Tokens.back()->Type = TT_TemplateString; + const char *EndOffset = + EndBacktick->TokenText.data() + 1 + CommentBacktickPos; + if (CommentBacktickPos != 0) { + // If the backtick was not the first character (e.g. in a comment), + // re-lex after the backtick position. + SourceLocation Loc = EndBacktick->Tok.getLocation(); + resetLexer(SourceMgr.getFileOffset(Loc) + CommentBacktickPos + 1); + } + Tokens.back()->TokenText = + StringRef(Tokens.back()->TokenText.data(), + EndOffset - Tokens.back()->TokenText.data()); + + unsigned EndOriginalColumn = EndBacktick->OriginalColumn; + if (EndOriginalColumn == 0) { + SourceLocation Loc = EndBacktick->Tok.getLocation(); + EndOriginalColumn = SourceMgr.getSpellingColumnNumber(Loc); + } + // If the ` is further down within the token (e.g. in a comment). + EndOriginalColumn += CommentBacktickPos; + + if (IsMultiline) { + // ColumnWidth is from backtick to last token in line. + // LastLineColumnWidth is 0 to backtick. + // x = `some content + // until here`; + Tokens.back()->ColumnWidth = + EndColumnInFirstLine - Tokens.back()->OriginalColumn; + Tokens.back()->LastLineColumnWidth = EndOriginalColumn; + Tokens.back()->IsMultiline = true; + } else { + // Token simply spans from start to end, +1 for the ` itself. + Tokens.back()->ColumnWidth = + EndOriginalColumn - Tokens.back()->OriginalColumn + 1; + } + return true; + } + return false; + } + bool tryMerge_TMacro() { if (Tokens.size() < 4) return false; @@ -772,6 +889,8 @@ private: String->OriginalColumn = Macro->OriginalColumn; String->ColumnWidth = encoding::columnWidthWithTabs( String->TokenText, String->OriginalColumn, Style.TabWidth, Encoding); + String->NewlinesBefore = Macro->NewlinesBefore; + String->HasUnescapedNewline = Macro->HasUnescapedNewline; Tokens.pop_back(); Tokens.pop_back(); @@ -842,21 +961,33 @@ private: return false; } + FormatToken *getStashedToken() { + // Create a synthesized second '>' or '<' token. + Token Tok = FormatTok->Tok; + StringRef TokenText = FormatTok->TokenText; + + unsigned OriginalColumn = FormatTok->OriginalColumn; + FormatTok = new (Allocator.Allocate()) FormatToken; + FormatTok->Tok = Tok; + SourceLocation TokLocation = + FormatTok->Tok.getLocation().getLocWithOffset(Tok.getLength() - 1); + FormatTok->Tok.setLocation(TokLocation); + FormatTok->WhitespaceRange = SourceRange(TokLocation, TokLocation); + FormatTok->TokenText = TokenText; + FormatTok->ColumnWidth = 1; + FormatTok->OriginalColumn = OriginalColumn + 1; + + return FormatTok; + } + FormatToken *getNextToken() { if (GreaterStashed) { - // Create a synthesized second '>' token. - // FIXME: Increment Column and set OriginalColumn. - Token Greater = FormatTok->Tok; - FormatTok = new (Allocator.Allocate()) FormatToken; - FormatTok->Tok = Greater; - SourceLocation GreaterLocation = - FormatTok->Tok.getLocation().getLocWithOffset(1); - FormatTok->WhitespaceRange = - SourceRange(GreaterLocation, GreaterLocation); - FormatTok->TokenText = ">"; - FormatTok->ColumnWidth = 1; GreaterStashed = false; - return FormatTok; + return getStashedToken(); + } + if (LessStashed) { + LessStashed = false; + return getStashedToken(); } FormatTok = new (Allocator.Allocate()) FormatToken; @@ -869,20 +1000,32 @@ private: // Consume and record whitespace until we find a significant token. unsigned WhitespaceLength = TrailingWhitespace; while (FormatTok->Tok.is(tok::unknown)) { - for (int i = 0, e = FormatTok->TokenText.size(); i != e; ++i) { - switch (FormatTok->TokenText[i]) { + StringRef Text = FormatTok->TokenText; + auto EscapesNewline = [&](int pos) { + // A '\r' here is just part of '\r\n'. Skip it. + if (pos >= 0 && Text[pos] == '\r') + --pos; + // See whether there is an odd number of '\' before this. + unsigned count = 0; + for (; pos >= 0; --pos, ++count) + if (Text[pos] != '\\') + break; + return count & 1; + }; + // FIXME: This miscounts tok:unknown tokens that are not just + // whitespace, e.g. a '`' character. + for (int i = 0, e = Text.size(); i != e; ++i) { + switch (Text[i]) { case '\n': ++FormatTok->NewlinesBefore; - // FIXME: This is technically incorrect, as it could also - // be a literal backslash at the end of the line. - if (i == 0 || (FormatTok->TokenText[i - 1] != '\\' && - (FormatTok->TokenText[i - 1] != '\r' || i == 1 || - FormatTok->TokenText[i - 2] != '\\'))) - FormatTok->HasUnescapedNewline = true; + FormatTok->HasUnescapedNewline = !EscapesNewline(i - 1); FormatTok->LastNewlineOffset = WhitespaceLength + i + 1; Column = 0; break; case '\r': + FormatTok->LastNewlineOffset = WhitespaceLength + i + 1; + Column = 0; + break; case '\f': case '\v': Column = 0; @@ -894,8 +1037,7 @@ private: Column += Style.TabWidth - Column % Style.TabWidth; break; case '\\': - if (i + 1 == e || (FormatTok->TokenText[i + 1] != '\r' && - FormatTok->TokenText[i + 1] != '\n')) + if (i + 1 == e || (Text[i + 1] != '\r' && Text[i + 1] != '\n')) FormatTok->Type = TT_ImplicitStringLiteral; break; default: @@ -920,6 +1062,7 @@ private: FormatTok->TokenText[1] == '\n') { ++FormatTok->NewlinesBefore; WhitespaceLength += 2; + FormatTok->LastNewlineOffset = 2; Column = 0; FormatTok->TokenText = FormatTok->TokenText.substr(2); } @@ -948,6 +1091,10 @@ private: FormatTok->Tok.setKind(tok::greater); FormatTok->TokenText = FormatTok->TokenText.substr(0, 1); GreaterStashed = true; + } else if (FormatTok->Tok.is(tok::lessless)) { + FormatTok->Tok.setKind(tok::less); + FormatTok->TokenText = FormatTok->TokenText.substr(0, 1); + LessStashed = true; } // Now FormatTok is the next non-whitespace token. @@ -975,16 +1122,16 @@ private: Column = FormatTok->LastLineColumnWidth; } - FormatTok->IsForEachMacro = - std::binary_search(ForEachMacros.begin(), ForEachMacros.end(), - FormatTok->Tok.getIdentifierInfo()); + if (std::find(ForEachMacros.begin(), ForEachMacros.end(), + FormatTok->Tok.getIdentifierInfo()) != ForEachMacros.end()) + FormatTok->Type = TT_ForEachMacro; return FormatTok; } FormatToken *FormatTok; bool IsFirstToken; - bool GreaterStashed; + bool GreaterStashed, LessStashed; unsigned Column; unsigned TrailingWhitespace; std::unique_ptr<Lexer> Lex; @@ -1072,13 +1219,13 @@ public: << "\n"); } - tooling::Replacements format() { + tooling::Replacements format(bool *IncompleteFormat) { tooling::Replacements Result; FormatTokenLexer Tokens(SourceMgr, ID, Style, Encoding); UnwrappedLineParser Parser(Style, Tokens.getKeywords(), Tokens.lex(), *this); - bool StructuralError = Parser.parse(); + Parser.parse(); assert(UnwrappedLines.rbegin()->empty()); for (unsigned Run = 0, RunE = UnwrappedLines.size(); Run + 1 != RunE; ++Run) { @@ -1088,7 +1235,7 @@ public: AnnotatedLines.push_back(new AnnotatedLine(UnwrappedLines[Run][i])); } tooling::Replacements RunResult = - format(AnnotatedLines, StructuralError, Tokens); + format(AnnotatedLines, Tokens, IncompleteFormat); DEBUG({ llvm::dbgs() << "Replacements for run " << Run << ":\n"; for (tooling::Replacements::iterator I = RunResult.begin(), @@ -1107,7 +1254,7 @@ public: } tooling::Replacements format(SmallVectorImpl<AnnotatedLine *> &AnnotatedLines, - bool StructuralError, FormatTokenLexer &Tokens) { + FormatTokenLexer &Tokens, bool *IncompleteFormat) { TokenAnnotator Annotator(Style, Tokens.getKeywords()); for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) { Annotator.annotate(*AnnotatedLines[i]); @@ -1122,8 +1269,9 @@ public: ContinuationIndenter Indenter(Style, Tokens.getKeywords(), SourceMgr, Whitespaces, Encoding, BinPackInconclusiveFunctions); - UnwrappedLineFormatter Formatter(&Indenter, &Whitespaces, Style); - Formatter.format(AnnotatedLines, /*DryRun=*/false); + UnwrappedLineFormatter(&Indenter, &Whitespaces, Style, Tokens.getKeywords(), + IncompleteFormat) + .format(AnnotatedLines); return Whitespaces.generateReplacements(); } @@ -1340,27 +1488,20 @@ private: } // end anonymous namespace -tooling::Replacements reformat(const FormatStyle &Style, Lexer &Lex, - SourceManager &SourceMgr, - ArrayRef<CharSourceRange> Ranges) { - if (Style.DisableFormat) - return tooling::Replacements(); - return reformat(Style, SourceMgr, - SourceMgr.getFileID(Lex.getSourceLocation()), Ranges); -} - tooling::Replacements reformat(const FormatStyle &Style, SourceManager &SourceMgr, FileID ID, - ArrayRef<CharSourceRange> Ranges) { + ArrayRef<CharSourceRange> Ranges, + bool *IncompleteFormat) { if (Style.DisableFormat) return tooling::Replacements(); Formatter formatter(Style, SourceMgr, ID, Ranges); - return formatter.format(); + return formatter.format(IncompleteFormat); } tooling::Replacements reformat(const FormatStyle &Style, StringRef Code, ArrayRef<tooling::Range> Ranges, - StringRef FileName) { + StringRef FileName, + bool *IncompleteFormat) { if (Style.DisableFormat) return tooling::Replacements(); @@ -1383,7 +1524,7 @@ tooling::Replacements reformat(const FormatStyle &Style, StringRef Code, SourceLocation End = Start.getLocWithOffset(Range.getLength()); CharRanges.push_back(CharSourceRange::getCharRange(Start, End)); } - return reformat(Style, SourceMgr, ID, CharRanges); + return reformat(Style, SourceMgr, ID, CharRanges, IncompleteFormat); } LangOptions getFormattingLangOpts(const FormatStyle &Style) { @@ -1392,12 +1533,12 @@ LangOptions getFormattingLangOpts(const FormatStyle &Style) { LangOpts.CPlusPlus11 = Style.Standard == FormatStyle::LS_Cpp03 ? 0 : 1; LangOpts.CPlusPlus14 = Style.Standard == FormatStyle::LS_Cpp03 ? 0 : 1; LangOpts.LineComment = 1; - bool AlternativeOperators = Style.Language != FormatStyle::LK_JavaScript && - Style.Language != FormatStyle::LK_Java; + bool AlternativeOperators = Style.Language == FormatStyle::LK_Cpp; LangOpts.CXXOperatorNames = AlternativeOperators ? 1 : 0; LangOpts.Bool = 1; LangOpts.ObjC1 = 1; LangOpts.ObjC2 = 1; + LangOpts.MicrosoftExt = 1; // To get kw___try, kw___finally. return LangOpts; } @@ -1415,7 +1556,8 @@ const char *StyleOptionHelpDescription = static FormatStyle::LanguageKind getLanguageByFileName(StringRef FileName) { if (FileName.endswith(".java")) { return FormatStyle::LK_Java; - } else if (FileName.endswith_lower(".js")) { + } else if (FileName.endswith_lower(".js") || FileName.endswith_lower(".ts")) { + // JavaScript or TypeScript. return FormatStyle::LK_JavaScript; } else if (FileName.endswith_lower(".proto") || FileName.endswith_lower(".protodevel")) { diff --git a/contrib/llvm/tools/clang/lib/Format/FormatToken.cpp b/contrib/llvm/tools/clang/lib/Format/FormatToken.cpp index badb3a3..88678ca 100644 --- a/contrib/llvm/tools/clang/lib/Format/FormatToken.cpp +++ b/contrib/llvm/tools/clang/lib/Format/FormatToken.cpp @@ -18,6 +18,7 @@ #include "clang/Format/Format.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Debug.h" +#include <climits> namespace clang { namespace format { @@ -59,12 +60,13 @@ void TokenRole::precomputeFormattingInfos(const FormatToken *Token) {} unsigned CommaSeparatedList::formatAfterToken(LineState &State, ContinuationIndenter *Indenter, bool DryRun) { - if (!State.NextToken->Previous || !State.NextToken->Previous->Previous) + if (State.NextToken == nullptr || !State.NextToken->Previous) return 0; // Ensure that we start on the opening brace. - const FormatToken *LBrace = State.NextToken->Previous->Previous; - if (LBrace->isNot(tok::l_brace) || LBrace->BlockKind == BK_Block || + const FormatToken *LBrace = + State.NextToken->Previous->getPreviousNonComment(); + if (!LBrace || LBrace->isNot(tok::l_brace) || LBrace->BlockKind == BK_Block || LBrace->Type == TT_DictLiteral || LBrace->Next->Type == TT_DesignatedInitializerPeriod) return 0; @@ -132,9 +134,9 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) { return; // In C++11 braced list style, we should not format in columns unless they - // have many items (20 or more) or we allow bin-packing of function - // parameters. - if (Style.Cpp11BracedListStyle && !Style.BinPackParameters && + // have many items (20 or more) or we allow bin-packing of function call + // arguments. + if (Style.Cpp11BracedListStyle && !Style.BinPackArguments && Commas.size() < 19) return; @@ -143,19 +145,21 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) { return; FormatToken *ItemBegin = Token->Next; + while (ItemBegin->isTrailingComment()) + ItemBegin = ItemBegin->Next; SmallVector<bool, 8> MustBreakBeforeItem; // The lengths of an item if it is put at the end of the line. This includes // trailing comments which are otherwise ignored for column alignment. SmallVector<unsigned, 8> EndOfLineItemLength; - unsigned MinItemLength = Style.ColumnLimit; - unsigned MaxItemLength = 0; - + bool HasSeparatingComment = false; for (unsigned i = 0, e = Commas.size() + 1; i != e; ++i) { // Skip comments on their own line. - while (ItemBegin->HasUnescapedNewline && ItemBegin->isTrailingComment()) + while (ItemBegin->HasUnescapedNewline && ItemBegin->isTrailingComment()) { ItemBegin = ItemBegin->Next; + HasSeparatingComment = i > 0; + } MustBreakBeforeItem.push_back(ItemBegin->MustBreakBefore); if (ItemBegin->is(tok::l_brace)) @@ -178,8 +182,6 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) { ItemEnd = Commas[i]; // The comma is counted as part of the item when calculating the length. ItemLengths.push_back(CodePointsBetween(ItemBegin, ItemEnd)); - MinItemLength = std::min(MinItemLength, ItemLengths.back()); - MaxItemLength = std::max(MaxItemLength, ItemLengths.back()); // Consume trailing comments so the are included in EndOfLineItemLength. if (ItemEnd->Next && !ItemEnd->Next->HasUnescapedNewline && @@ -194,12 +196,9 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) { ItemBegin = ItemEnd->Next; } - // If this doesn't have a nested list, we require at least 6 elements in order - // create a column layout. If it has a nested list, column layout ensures one - // list element per line. If the difference between the shortest and longest - // element is too large, column layout would create too much whitespace. - if (HasNestedBracedList || Commas.size() < 5 || Token->NestingLevel != 0 || - MaxItemLength - MinItemLength > 10) + // Don't use column layout for nested lists, lists with few elements and in + // presence of separating comments. + if (Token->NestingLevel != 0 || Commas.size() < 5 || HasSeparatingComment) return; // We can never place more than ColumnLimit / 3 items in a row (because of the @@ -208,6 +207,7 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) { ColumnFormat Format; Format.Columns = Columns; Format.ColumnSizes.resize(Columns); + std::vector<unsigned> MinSizeInColumn(Columns, UINT_MAX); Format.LineCount = 1; bool HasRowWithSufficientColumns = false; unsigned Column = 0; @@ -219,9 +219,10 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) { } if (Column == Columns - 1) HasRowWithSufficientColumns = true; - unsigned length = + unsigned Length = (Column == Columns - 1) ? EndOfLineItemLength[i] : ItemLengths[i]; - Format.ColumnSizes[Column] = std::max(Format.ColumnSizes[Column], length); + Format.ColumnSizes[Column] = std::max(Format.ColumnSizes[Column], Length); + MinSizeInColumn[Column] = std::min(MinSizeInColumn[Column], Length); ++Column; } // If all rows are terminated early (e.g. by trailing comments), we don't @@ -229,9 +230,19 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) { if (!HasRowWithSufficientColumns) break; Format.TotalWidth = Columns - 1; // Width of the N-1 spaces. - for (unsigned i = 0; i < Columns; ++i) { + + for (unsigned i = 0; i < Columns; ++i) Format.TotalWidth += Format.ColumnSizes[i]; - } + + // Don't use this Format, if the difference between the longest and shortest + // element in a column exceeds a threshold to avoid excessive spaces. + if ([&] { + for (unsigned i = 0; i < Columns - 1; ++i) + if (Format.ColumnSizes[i] - MinSizeInColumn[i] > 10) + return true; + return false; + }()) + continue; // Ignore layouts that are bound to violate the column limit. if (Format.TotalWidth > Style.ColumnLimit) diff --git a/contrib/llvm/tools/clang/lib/Format/FormatToken.h b/contrib/llvm/tools/clang/lib/Format/FormatToken.h index 4811e02..ec0fdf4 100644 --- a/contrib/llvm/tools/clang/lib/Format/FormatToken.h +++ b/contrib/llvm/tools/clang/lib/Format/FormatToken.h @@ -41,13 +41,19 @@ enum TokenType { TT_CtorInitializerComma, TT_DesignatedInitializerPeriod, TT_DictLiteral, + TT_ForEachMacro, + TT_FunctionAnnotationRParen, TT_FunctionDeclarationName, TT_FunctionLBrace, TT_FunctionTypeLParen, TT_ImplicitStringLiteral, TT_InheritanceColon, + TT_InlineASMBrace, TT_InlineASMColon, TT_JavaAnnotation, + TT_JsFatArrow, + TT_JsTypeColon, + TT_JsTypeOptionalQuestion, TT_LambdaArrow, TT_LambdaLSquare, TT_LeadingJavaAnnotation, @@ -59,6 +65,7 @@ enum TokenType { TT_ObjCMethodExpr, TT_ObjCMethodSpecifier, TT_ObjCProperty, + TT_ObjCStringLiteral, TT_OverloadedOperator, TT_OverloadedOperatorLParen, TT_PointerOrReference, @@ -69,6 +76,7 @@ enum TokenType { TT_StartOfName, TT_TemplateCloser, TT_TemplateOpener, + TT_TemplateString, TT_TrailingAnnotation, TT_TrailingReturnArrow, TT_TrailingUnaryOperator, @@ -102,21 +110,7 @@ class AnnotatedLine; /// \brief A wrapper around a \c Token storing information about the /// whitespace characters preceding it. struct FormatToken { - FormatToken() - : NewlinesBefore(0), HasUnescapedNewline(false), LastNewlineOffset(0), - ColumnWidth(0), LastLineColumnWidth(0), IsMultiline(false), - IsFirst(false), MustBreakBefore(false), IsUnterminatedLiteral(false), - BlockKind(BK_Unknown), Type(TT_Unknown), SpacesRequiredBefore(0), - CanBreakBefore(false), ClosesTemplateDeclaration(false), - ParameterCount(0), BlockParameterCount(0), - PackingKind(PPK_Inconclusive), TotalLength(0), UnbreakableTailLength(0), - BindingStrength(0), NestingLevel(0), SplitPenalty(0), - LongestObjCSelectorName(0), FakeRParens(0), - StartsBinaryExpression(false), EndsBinaryExpression(false), - OperatorIndex(0), LastOperator(false), - PartOfMultiVariableDeclStmt(false), IsForEachMacro(false), - MatchingParen(nullptr), Previous(nullptr), Next(nullptr), - Decision(FD_Unformatted), Finalized(false) {} + FormatToken() {} /// \brief The \c Token. Token Tok; @@ -125,48 +119,39 @@ struct FormatToken { /// /// This can be used to determine what the user wrote in the original code /// and thereby e.g. leave an empty line between two function definitions. - unsigned NewlinesBefore; + unsigned NewlinesBefore = 0; /// \brief Whether there is at least one unescaped newline before the \c /// Token. - bool HasUnescapedNewline; + bool HasUnescapedNewline = false; /// \brief The range of the whitespace immediately preceding the \c Token. SourceRange WhitespaceRange; /// \brief The offset just past the last '\n' in this token's leading /// whitespace (relative to \c WhiteSpaceStart). 0 if there is no '\n'. - unsigned LastNewlineOffset; + unsigned LastNewlineOffset = 0; /// \brief The width of the non-whitespace parts of the token (or its first /// line for multi-line tokens) in columns. /// We need this to correctly measure number of columns a token spans. - unsigned ColumnWidth; + unsigned ColumnWidth = 0; /// \brief Contains the width in columns of the last line of a multi-line /// token. - unsigned LastLineColumnWidth; + unsigned LastLineColumnWidth = 0; /// \brief Whether the token text contains newlines (escaped or not). - bool IsMultiline; + bool IsMultiline = false; /// \brief Indicates that this is the first token. - bool IsFirst; + bool IsFirst = false; /// \brief Whether there must be a line break before this token. /// /// This happens for example when a preprocessor directive ended directly /// before the token. - bool MustBreakBefore; - - /// \brief Returns actual token start location without leading escaped - /// newlines and whitespace. - /// - /// This can be different to Tok.getLocation(), which includes leading escaped - /// newlines. - SourceLocation getStartOfNonWhitespace() const { - return WhitespaceRange.getEnd(); - } + bool MustBreakBefore = false; /// \brief The raw text of the token. /// @@ -175,69 +160,74 @@ struct FormatToken { StringRef TokenText; /// \brief Set to \c true if this token is an unterminated literal. - bool IsUnterminatedLiteral; + bool IsUnterminatedLiteral = 0; /// \brief Contains the kind of block if this token is a brace. - BraceBlockKind BlockKind; + BraceBlockKind BlockKind = BK_Unknown; - TokenType Type; + TokenType Type = TT_Unknown; /// \brief The number of spaces that should be inserted before this token. - unsigned SpacesRequiredBefore; + unsigned SpacesRequiredBefore = 0; /// \brief \c true if it is allowed to break before this token. - bool CanBreakBefore; + bool CanBreakBefore = false; - bool ClosesTemplateDeclaration; + /// \brief \c true if this is the ">" of "template<..>". + bool ClosesTemplateDeclaration = false; /// \brief Number of parameters, if this is "(", "[" or "<". /// /// This is initialized to 1 as we don't need to distinguish functions with /// 0 parameters from functions with 1 parameter. Thus, we can simply count /// the number of commas. - unsigned ParameterCount; + unsigned ParameterCount = 0; /// \brief Number of parameters that are nested blocks, /// if this is "(", "[" or "<". - unsigned BlockParameterCount; + unsigned BlockParameterCount = 0; + + /// \brief If this is a bracket ("<", "(", "[" or "{"), contains the kind of + /// the surrounding bracket. + tok::TokenKind ParentBracket = tok::unknown; /// \brief A token can have a special role that can carry extra information /// about the token's formatting. std::unique_ptr<TokenRole> Role; /// \brief If this is an opening parenthesis, how are the parameters packed? - ParameterPackingKind PackingKind; + ParameterPackingKind PackingKind = PPK_Inconclusive; /// \brief The total length of the unwrapped line up to and including this /// token. - unsigned TotalLength; + unsigned TotalLength = 0; /// \brief The original 0-based column of this token, including expanded tabs. /// The configured TabWidth is used as tab width. - unsigned OriginalColumn; + unsigned OriginalColumn = 0; /// \brief The length of following tokens until the next natural split point, /// or the next token that can be broken. - unsigned UnbreakableTailLength; + unsigned UnbreakableTailLength = 0; // FIXME: Come up with a 'cleaner' concept. /// \brief The binding strength of a token. This is a combined value of /// operator precedence, parenthesis nesting, etc. - unsigned BindingStrength; + unsigned BindingStrength = 0; /// \brief The nesting level of this token, i.e. the number of surrounding (), /// [], {} or <>. - unsigned NestingLevel; + unsigned NestingLevel = 0; /// \brief Penalty for inserting a line break before this token. - unsigned SplitPenalty; + unsigned SplitPenalty = 0; /// \brief If this is the first ObjC selector name in an ObjC method /// definition or call, this contains the length of the longest name. /// /// This being set to 0 means that the selectors should not be colon-aligned, /// e.g. because several of them are block-type. - unsigned LongestObjCSelectorName; + unsigned LongestObjCSelectorName = 0; /// \brief Stores the number of required fake parentheses and the /// corresponding operator precedence. @@ -246,29 +236,47 @@ struct FormatToken { /// reverse order, i.e. inner fake parenthesis first. SmallVector<prec::Level, 4> FakeLParens; /// \brief Insert this many fake ) after this token for correct indentation. - unsigned FakeRParens; + unsigned FakeRParens = 0; /// \brief \c true if this token starts a binary expression, i.e. has at least /// one fake l_paren with a precedence greater than prec::Unknown. - bool StartsBinaryExpression; + bool StartsBinaryExpression = false; /// \brief \c true if this token ends a binary expression. - bool EndsBinaryExpression; + bool EndsBinaryExpression = false; /// \brief Is this is an operator (or "."/"->") in a sequence of operators /// with the same precedence, contains the 0-based operator index. - unsigned OperatorIndex; + unsigned OperatorIndex = 0; /// \brief Is this the last operator (or "."/"->") in a sequence of operators /// with the same precedence? - bool LastOperator; + bool LastOperator = false; /// \brief Is this token part of a \c DeclStmt defining multiple variables? /// /// Only set if \c Type == \c TT_StartOfName. - bool PartOfMultiVariableDeclStmt; + bool PartOfMultiVariableDeclStmt = false; + + /// \brief If this is a bracket, this points to the matching one. + FormatToken *MatchingParen = nullptr; + + /// \brief The previous token in the unwrapped line. + FormatToken *Previous = nullptr; - /// \brief Is this a foreach macro? - bool IsForEachMacro; + /// \brief The next token in the unwrapped line. + FormatToken *Next = nullptr; + + /// \brief If this token starts a block, this contains all the unwrapped lines + /// in it. + SmallVector<AnnotatedLine *, 1> Children; + + /// \brief Stores the formatting decision for the token once it was made. + FormatDecision Decision = FD_Unformatted; + + /// \brief If \c true, this token has been fully formatted (indented and + /// potentially re-formatted inside), and we do not allow further formatting + /// changes. + bool Finalized = false; bool is(tok::TokenKind Kind) const { return Tok.is(Kind); } bool is(TokenType TT) const { return Type == TT; } @@ -278,27 +286,10 @@ struct FormatToken { template <typename A, typename B> bool isOneOf(A K1, B K2) const { return is(K1) || is(K2); } - template <typename A, typename B, typename C> - bool isOneOf(A K1, B K2, C K3) const { - return is(K1) || is(K2) || is(K3); - } - template <typename A, typename B, typename C, typename D> - bool isOneOf(A K1, B K2, C K3, D K4) const { - return is(K1) || is(K2) || is(K3) || is(K4); + template <typename A, typename B, typename... Ts> + bool isOneOf(A K1, B K2, Ts... Ks) const { + return is(K1) || isOneOf(K2, Ks...); } - template <typename A, typename B, typename C, typename D, typename E> - bool isOneOf(A K1, B K2, C K3, D K4, E K5) const { - return is(K1) || is(K2) || is(K3) || is(K4) || is(K5); - } - template <typename T> - bool isOneOf(T K1, T K2, T K3, T K4, T K5, T K6, T K7 = tok::NUM_TOKENS, - T K8 = tok::NUM_TOKENS, T K9 = tok::NUM_TOKENS, - T K10 = tok::NUM_TOKENS, T K11 = tok::NUM_TOKENS, - T K12 = tok::NUM_TOKENS) const { - return is(K1) || is(K2) || is(K3) || is(K4) || is(K5) || is(K6) || is(K7) || - is(K8) || is(K9) || is(K10) || is(K11) || is(K12); - } - template <typename T> bool isNot(T Kind) const { return !is(Kind); } bool isStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } @@ -336,7 +327,8 @@ struct FormatToken { /// \brief Returns \c true if this is a "." or "->" accessing a member. bool isMemberAccess() const { return isOneOf(tok::arrow, tok::period, tok::arrowstar) && - !isOneOf(TT_DesignatedInitializerPeriod, TT_TrailingReturnArrow); + !isOneOf(TT_DesignatedInitializerPeriod, TT_TrailingReturnArrow, + TT_LambdaArrow); } bool isUnaryOperator() const { @@ -385,6 +377,15 @@ struct FormatToken { } } + /// \brief Returns actual token start location without leading escaped + /// newlines and whitespace. + /// + /// This can be different to Tok.getLocation(), which includes leading escaped + /// newlines. + SourceLocation getStartOfNonWhitespace() const { + return WhitespaceRange.getEnd(); + } + prec::Level getPrecedence() const { return getBinOpPrecedence(Tok.getKind(), true, true); } @@ -419,25 +420,10 @@ struct FormatToken { return MatchingParen && MatchingParen->opensBlockTypeList(Style); } - FormatToken *MatchingParen; - - FormatToken *Previous; - FormatToken *Next; - - SmallVector<AnnotatedLine *, 1> Children; - - /// \brief Stores the formatting decision for the token once it was made. - FormatDecision Decision; - - /// \brief If \c true, this token has been fully formatted (indented and - /// potentially re-formatted inside), and we do not allow further formatting - /// changes. - bool Finalized; - private: // Disallow copying. - FormatToken(const FormatToken &) LLVM_DELETED_FUNCTION; - void operator=(const FormatToken &) LLVM_DELETED_FUNCTION; + FormatToken(const FormatToken &) = delete; + void operator=(const FormatToken &) = delete; }; class ContinuationIndenter; @@ -543,6 +529,7 @@ struct AdditionalKeywords { kw_finally = &IdentTable.get("finally"); kw_function = &IdentTable.get("function"); + kw_import = &IdentTable.get("import"); kw_var = &IdentTable.get("var"); kw_abstract = &IdentTable.get("abstract"); @@ -555,24 +542,33 @@ struct AdditionalKeywords { kw_package = &IdentTable.get("package"); kw_synchronized = &IdentTable.get("synchronized"); kw_throws = &IdentTable.get("throws"); + kw___except = &IdentTable.get("__except"); + + kw_mark = &IdentTable.get("mark"); kw_option = &IdentTable.get("option"); kw_optional = &IdentTable.get("optional"); kw_repeated = &IdentTable.get("repeated"); kw_required = &IdentTable.get("required"); kw_returns = &IdentTable.get("returns"); + + kw_signals = &IdentTable.get("signals"); + kw_slots = &IdentTable.get("slots"); + kw_qslots = &IdentTable.get("Q_SLOTS"); } - // ObjC context sensitive keywords. + // Context sensitive keywords. IdentifierInfo *kw_in; IdentifierInfo *kw_CF_ENUM; IdentifierInfo *kw_CF_OPTIONS; IdentifierInfo *kw_NS_ENUM; IdentifierInfo *kw_NS_OPTIONS; + IdentifierInfo *kw___except; // JavaScript keywords. IdentifierInfo *kw_finally; IdentifierInfo *kw_function; + IdentifierInfo *kw_import; IdentifierInfo *kw_var; // Java keywords. @@ -587,12 +583,20 @@ struct AdditionalKeywords { IdentifierInfo *kw_synchronized; IdentifierInfo *kw_throws; + // Pragma keywords. + IdentifierInfo *kw_mark; + // Proto keywords. IdentifierInfo *kw_option; IdentifierInfo *kw_optional; IdentifierInfo *kw_repeated; IdentifierInfo *kw_required; IdentifierInfo *kw_returns; + + // QT keywords. + IdentifierInfo *kw_signals; + IdentifierInfo *kw_slots; + IdentifierInfo *kw_qslots; }; } // namespace format diff --git a/contrib/llvm/tools/clang/lib/Format/TokenAnnotator.cpp b/contrib/llvm/tools/clang/lib/Format/TokenAnnotator.cpp index 4ba3f91..98f5709 100644 --- a/contrib/llvm/tools/clang/lib/Format/TokenAnnotator.cpp +++ b/contrib/llvm/tools/clang/lib/Format/TokenAnnotator.cpp @@ -15,6 +15,7 @@ #include "TokenAnnotator.h" #include "clang/Basic/SourceManager.h" +#include "llvm/ADT/SmallPtrSet.h" #include "llvm/Support/Debug.h" #define DEBUG_TYPE "format-token-annotator" @@ -43,8 +44,14 @@ private: bool parseAngle() { if (!CurrentToken) return false; - ScopedContextCreator ContextCreator(*this, tok::less, 10); FormatToken *Left = CurrentToken->Previous; + Left->ParentBracket = Contexts.back().ContextKind; + ScopedContextCreator ContextCreator(*this, tok::less, 10); + + // If this angle is in the context of an expression, we need to be more + // hesitant to detect it as opening template parameters. + bool InExprContext = Contexts.back().IsExpression; + Contexts.back().IsExpression = false; // If there's a template keyword before the opening angle bracket, this is a // template parameter, not an argument. @@ -68,8 +75,8 @@ private: next(); continue; } - if (CurrentToken->isOneOf(tok::r_paren, tok::r_square, tok::r_brace, - tok::colon, tok::question)) + if (CurrentToken->isOneOf(tok::r_paren, tok::r_square, tok::r_brace) || + (CurrentToken->isOneOf(tok::colon, tok::question) && InExprContext)) return false; // If a && or || is found and interpreted as a binary operator, this set // of angles is likely part of something like "a < b && c > d". If the @@ -92,6 +99,8 @@ private: bool parseParens(bool LookForDecls = false) { if (!CurrentToken) return false; + FormatToken *Left = CurrentToken->Previous; + Left->ParentBracket = Contexts.back().ContextKind; ScopedContextCreator ContextCreator(*this, tok::l_paren, 1); // FIXME: This is a bit of a hack. Do better. @@ -99,7 +108,6 @@ private: Contexts.size() == 2 && Contexts[0].ColonIsForRangeExpr; bool StartsObjCMethodExpr = false; - FormatToken *Left = CurrentToken->Previous; if (CurrentToken->is(tok::caret)) { // (^ can start a block type. Left->Type = TT_ObjCBlockLParen; @@ -117,22 +125,22 @@ private: Left->Previous->is(TT_BinaryOperator))) { // static_assert, if and while usually contain expressions. Contexts.back().IsExpression = true; - } else if (Line.InPPDirective && - (!Left->Previous || - !Left->Previous->isOneOf(tok::identifier, - TT_OverloadedOperator))) { - Contexts.back().IsExpression = true; } else if (Left->Previous && Left->Previous->is(tok::r_square) && Left->Previous->MatchingParen && Left->Previous->MatchingParen->is(TT_LambdaLSquare)) { // This is a parameter list of a lambda expression. Contexts.back().IsExpression = false; + } else if (Line.InPPDirective && + (!Left->Previous || + !Left->Previous->isOneOf(tok::identifier, + TT_OverloadedOperator))) { + Contexts.back().IsExpression = true; } else if (Contexts[Contexts.size() - 2].CaretFound) { // This is the parameter list of an ObjC block. Contexts.back().IsExpression = false; } else if (Left->Previous && Left->Previous->is(tok::kw___attribute)) { Left->Type = TT_AttributeParen; - } else if (Left->Previous && Left->Previous->IsForEachMacro) { + } else if (Left->Previous && Left->Previous->is(TT_ForEachMacro)) { // The first argument to a foreach macro is a declaration. Contexts.back().IsForEachMacro = true; Contexts.back().IsExpression = false; @@ -149,6 +157,8 @@ private: bool MightBeFunctionType = CurrentToken->is(tok::star); bool HasMultipleLines = false; bool HasMultipleParametersOnALine = false; + bool MightBeObjCForRangeLoop = + Left->Previous && Left->Previous->is(tok::kw_for); while (CurrentToken) { // LookForDecls is set when "if (" has been seen. Check for // 'identifier' '*' 'identifier' followed by not '=' -- this @@ -210,7 +220,8 @@ private: } if (CurrentToken->isOneOf(tok::r_square, tok::r_brace)) return false; - else if (CurrentToken->is(tok::l_brace)) + + if (CurrentToken->is(tok::l_brace)) Left->Type = TT_Unknown; // Not TT_ObjCBlockLParen if (CurrentToken->is(tok::comma) && CurrentToken->Next && !CurrentToken->Next->HasUnescapedNewline && @@ -219,6 +230,15 @@ private: if (CurrentToken->isOneOf(tok::kw_const, tok::kw_auto) || CurrentToken->isSimpleTypeSpecifier()) Contexts.back().IsExpression = false; + if (CurrentToken->isOneOf(tok::semi, tok::colon)) + MightBeObjCForRangeLoop = false; + if (MightBeObjCForRangeLoop && CurrentToken->is(Keywords.kw_in)) + CurrentToken->Type = TT_ObjCForIn; + // When we discover a 'new', we set CanBeExpression to 'false' in order to + // parse the type correctly. Reset that after a comma. + if (CurrentToken->is(tok::comma)) + Contexts.back().CanBeExpression = true; + FormatToken *Tok = CurrentToken; if (!consumeToken()) return false; @@ -237,6 +257,7 @@ private: // ')' or ']'), it could be the start of an Objective-C method // expression, or it could the the start of an Objective-C array literal. FormatToken *Left = CurrentToken->Previous; + Left->ParentBracket = Contexts.back().ContextKind; FormatToken *Parent = Left->getPreviousNonComment(); bool StartsObjCMethodExpr = Contexts.back().CanBeExpression && Left->isNot(TT_LambdaLSquare) && @@ -316,6 +337,7 @@ private: bool parseBrace() { if (CurrentToken) { FormatToken *Left = CurrentToken->Previous; + Left->ParentBracket = Contexts.back().ContextKind; if (Contexts.back().CaretFound) Left->Type = TT_ObjCBlockLBrace; @@ -342,7 +364,8 @@ private: Style.Language == FormatStyle::LK_Proto) && Previous->is(tok::identifier)) Previous->Type = TT_SelectorName; - if (CurrentToken->is(tok::colon)) + if (CurrentToken->is(tok::colon) || + Style.Language == FormatStyle::LK_JavaScript) Left->Type = TT_DictLiteral; } if (!consumeToken()) @@ -408,10 +431,18 @@ private: if (!Tok->Previous) return false; // Colons from ?: are handled in parseConditional(). - if (Tok->Previous->is(tok::r_paren) && Contexts.size() == 1 && - Line.First->isNot(tok::kw_case)) { - Tok->Type = TT_CtorInitializerColon; - } else if (Contexts.back().ColonIsDictLiteral) { + if (Style.Language == FormatStyle::LK_JavaScript) { + if (Contexts.back().ColonIsForRangeExpr || // colon in for loop + (Contexts.size() == 1 && // switch/case labels + !Line.First->isOneOf(tok::kw_enum, tok::kw_case)) || + Contexts.back().ContextKind == tok::l_paren || // function params + Contexts.back().ContextKind == tok::l_square || // array type + Line.MustBeDeclaration) { // method/property declaration + Tok->Type = TT_JsTypeColon; + break; + } + } + if (Contexts.back().ColonIsDictLiteral) { Tok->Type = TT_DictLiteral; } else if (Contexts.back().ColonIsObjCMethodExpr || Line.First->is(TT_ObjCMethodSpecifier)) { @@ -429,7 +460,10 @@ private: Tok->Type = TT_BitFieldColon; } else if (Contexts.size() == 1 && !Line.First->isOneOf(tok::kw_enum, tok::kw_case)) { - Tok->Type = TT_InheritanceColon; + if (Tok->Previous->is(tok::r_paren)) + Tok->Type = TT_CtorInitializerColon; + else + Tok->Type = TT_InheritanceColon; } else if (Tok->Previous->is(tok::identifier) && Tok->Next && Tok->Next->isOneOf(tok::r_paren, tok::comma)) { // This handles a special macro in ObjC code where selectors including @@ -471,13 +505,15 @@ private: return false; break; case tok::less: - if ((!Tok->Previous || + if (!NonTemplateLess.count(Tok) && + (!Tok->Previous || (!Tok->Previous->Tok.isLiteral() && !(Tok->Previous->is(tok::r_paren) && Contexts.size() > 1))) && parseAngle()) { Tok->Type = TT_TemplateOpener; } else { Tok->Type = TT_BinaryOperator; + NonTemplateLess.insert(Tok); CurrentToken = Tok; next(); } @@ -509,21 +545,34 @@ private: } break; case tok::question: + if (Style.Language == FormatStyle::LK_JavaScript && Tok->Next && + Tok->Next->isOneOf(tok::semi, tok::colon, tok::r_paren, + tok::r_brace)) { + // Question marks before semicolons, colons, etc. indicate optional + // types (fields, parameters), e.g. + // function(x?: string, y?) {...} + // class X { y?; } + Tok->Type = TT_JsTypeOptionalQuestion; + break; + } + // Declarations cannot be conditional expressions, this can only be part + // of a type declaration. + if (Line.MustBeDeclaration && + Style.Language == FormatStyle::LK_JavaScript) + break; parseConditional(); break; case tok::kw_template: parseTemplateDeclaration(); break; - case tok::identifier: - if (Line.First->is(tok::kw_for) && Tok->is(Keywords.kw_in) && - Tok->Previous->isNot(tok::colon)) - Tok->Type = TT_ObjCForIn; - break; case tok::comma: - if (Contexts.back().FirstStartOfName) - Contexts.back().FirstStartOfName->PartOfMultiVariableDeclStmt = true; if (Contexts.back().InCtorInitializer) Tok->Type = TT_CtorInitializerComma; + else if (Contexts.back().FirstStartOfName && + (Contexts.size() == 1 || Line.First->is(tok::kw_for))) { + Contexts.back().FirstStartOfName->PartOfMultiVariableDeclStmt = true; + Line.IsMultiVariableDeclStmt = true; + } if (Contexts.back().IsForEachMacro) Contexts.back().IsExpression = true; break; @@ -557,11 +606,14 @@ private: void parsePragma() { next(); // Consume "pragma". - if (CurrentToken && CurrentToken->TokenText == "mark") { + if (CurrentToken && + CurrentToken->isOneOf(Keywords.kw_mark, Keywords.kw_option)) { + bool IsMark = CurrentToken->is(Keywords.kw_mark); next(); // Consume "mark". next(); // Consume first token (so we fix leading whitespace). while (CurrentToken) { - CurrentToken->Type = TT_ImplicitStringLiteral; + if (IsMark || CurrentToken->Previous->is(TT_BinaryOperator)) + CurrentToken->Type = TT_ImplicitStringLiteral; next(); } } @@ -582,6 +634,7 @@ private: return Type; switch (CurrentToken->Tok.getIdentifierInfo()->getPPKeywordID()) { case tok::pp_include: + case tok::pp_include_next: case tok::pp_import: next(); parseIncludeDirective(); @@ -609,9 +662,9 @@ private: public: LineType parseLine() { - if (CurrentToken->is(tok::hash)) { + NonTemplateLess.clear(); + if (CurrentToken->is(tok::hash)) return parsePreprocessorDirective(); - } // Directly allow to 'import <string-literal>' to support protocol buffer // definitions (code.google.com/p/protobuf) or missing "#" (either way we @@ -635,6 +688,15 @@ public: return LT_ImportStatement; } + // In .proto files, top-level options are very similar to import statements + // and should not be line-wrapped. + if (Style.Language == FormatStyle::LK_Proto && Line.Level == 0 && + CurrentToken->is(Keywords.kw_option)) { + next(); + if (CurrentToken && CurrentToken->is(tok::identifier)) + return LT_ImportStatement; + } + bool KeywordVirtualFound = false; bool ImportStatement = false; while (CurrentToken) { @@ -678,11 +740,13 @@ private: // Reset token type in case we have already looked at it and then // recovered from an error (e.g. failure to find the matching >). - if (!CurrentToken->isOneOf(TT_LambdaLSquare, TT_FunctionLBrace, - TT_ImplicitStringLiteral, TT_RegexLiteral, + if (!CurrentToken->isOneOf(TT_LambdaLSquare, TT_ForEachMacro, + TT_FunctionLBrace, TT_ImplicitStringLiteral, + TT_InlineASMBrace, TT_RegexLiteral, TT_TrailingReturnArrow)) CurrentToken->Type = TT_Unknown; CurrentToken->Role.reset(); + CurrentToken->MatchingParen = nullptr; CurrentToken->FakeLParens.clear(); CurrentToken->FakeRParens = 0; } @@ -705,27 +769,22 @@ private: Context(tok::TokenKind ContextKind, unsigned BindingStrength, bool IsExpression) : ContextKind(ContextKind), BindingStrength(BindingStrength), - LongestObjCSelectorName(0), ColonIsForRangeExpr(false), - ColonIsDictLiteral(false), ColonIsObjCMethodExpr(false), - FirstObjCSelectorName(nullptr), FirstStartOfName(nullptr), - IsExpression(IsExpression), CanBeExpression(true), - InTemplateArgument(false), InCtorInitializer(false), - CaretFound(false), IsForEachMacro(false) {} + IsExpression(IsExpression) {} tok::TokenKind ContextKind; unsigned BindingStrength; - unsigned LongestObjCSelectorName; - bool ColonIsForRangeExpr; - bool ColonIsDictLiteral; - bool ColonIsObjCMethodExpr; - FormatToken *FirstObjCSelectorName; - FormatToken *FirstStartOfName; bool IsExpression; - bool CanBeExpression; - bool InTemplateArgument; - bool InCtorInitializer; - bool CaretFound; - bool IsForEachMacro; + unsigned LongestObjCSelectorName = 0; + bool ColonIsForRangeExpr = false; + bool ColonIsDictLiteral = false; + bool ColonIsObjCMethodExpr = false; + FormatToken *FirstObjCSelectorName = nullptr; + FormatToken *FirstStartOfName = nullptr; + bool CanBeExpression = true; + bool InTemplateArgument = false; + bool InCtorInitializer = false; + bool CaretFound = false; + bool IsForEachMacro = false; }; /// \brief Puts a new \c Context onto the stack \c Contexts for the lifetime @@ -746,23 +805,29 @@ private: void modifyContext(const FormatToken &Current) { if (Current.getPrecedence() == prec::Assignment && - !Line.First->isOneOf(tok::kw_template, tok::kw_using, - TT_UnaryOperator) && + !Line.First->isOneOf(tok::kw_template, tok::kw_using) && (!Current.Previous || Current.Previous->isNot(tok::kw_operator))) { Contexts.back().IsExpression = true; - for (FormatToken *Previous = Current.Previous; - Previous && !Previous->isOneOf(tok::comma, tok::semi); - Previous = Previous->Previous) { - if (Previous->isOneOf(tok::r_square, tok::r_paren)) { - Previous = Previous->MatchingParen; - if (!Previous) + if (!Line.First->is(TT_UnaryOperator)) { + for (FormatToken *Previous = Current.Previous; + Previous && !Previous->isOneOf(tok::comma, tok::semi); + Previous = Previous->Previous) { + if (Previous->isOneOf(tok::r_square, tok::r_paren)) { + Previous = Previous->MatchingParen; + if (!Previous) + break; + } + if (Previous->opensScope()) break; + if (Previous->isOneOf(TT_BinaryOperator, TT_UnaryOperator) && + Previous->isOneOf(tok::star, tok::amp, tok::ampamp) && + Previous->Previous && Previous->Previous->isNot(tok::equal)) + Previous->Type = TT_PointerOrReference; } - if (Previous->isOneOf(TT_BinaryOperator, TT_UnaryOperator) && - Previous->isOneOf(tok::star, tok::amp) && Previous->Previous && - Previous->Previous->isNot(tok::equal)) - Previous->Type = TT_PointerOrReference; } + } else if (Current.is(tok::lessless) && + (!Current.Previous || !Current.Previous->is(tok::kw_operator))) { + Contexts.back().IsExpression = true; } else if (Current.isOneOf(tok::kw_return, tok::kw_throw)) { Contexts.back().IsExpression = true; } else if (Current.is(TT_TrailingReturnArrow)) { @@ -833,30 +898,56 @@ private: } else if (Current.isOneOf(tok::exclaim, tok::tilde)) { Current.Type = TT_UnaryOperator; } else if (Current.is(tok::question)) { - Current.Type = TT_ConditionalExpr; + if (Style.Language == FormatStyle::LK_JavaScript && + Line.MustBeDeclaration) { + // In JavaScript, `interface X { foo?(): bar; }` is an optional method + // on the interface, not a ternary expression. + Current.Type = TT_JsTypeOptionalQuestion; + } else { + Current.Type = TT_ConditionalExpr; + } } else if (Current.isBinaryOperator() && (!Current.Previous || Current.Previous->isNot(tok::l_square))) { Current.Type = TT_BinaryOperator; } else if (Current.is(tok::comment)) { - if (Current.TokenText.startswith("//")) + if (Current.TokenText.startswith("/*")) { + if (Current.TokenText.endswith("*/")) + Current.Type = TT_BlockComment; + else + // The lexer has for some reason determined a comment here. But we + // cannot really handle it, if it isn't properly terminated. + Current.Tok.setKind(tok::unknown); + } else { Current.Type = TT_LineComment; - else - Current.Type = TT_BlockComment; + } } else if (Current.is(tok::r_paren)) { if (rParenEndsCast(Current)) Current.Type = TT_CastRParen; + if (Current.MatchingParen && Current.Next && + !Current.Next->isBinaryOperator() && + !Current.Next->isOneOf(tok::semi, tok::colon, tok::l_brace)) + if (FormatToken *BeforeParen = Current.MatchingParen->Previous) + if (BeforeParen->is(tok::identifier) && + BeforeParen->TokenText == BeforeParen->TokenText.upper() && + (!BeforeParen->Previous || + BeforeParen->Previous->ClosesTemplateDeclaration)) + Current.Type = TT_FunctionAnnotationRParen; } else if (Current.is(tok::at) && Current.Next) { - switch (Current.Next->Tok.getObjCKeywordID()) { - case tok::objc_interface: - case tok::objc_implementation: - case tok::objc_protocol: - Current.Type = TT_ObjCDecl; - break; - case tok::objc_property: - Current.Type = TT_ObjCProperty; - break; - default: - break; + if (Current.Next->isStringLiteral()) { + Current.Type = TT_ObjCStringLiteral; + } else { + switch (Current.Next->Tok.getObjCKeywordID()) { + case tok::objc_interface: + case tok::objc_implementation: + case tok::objc_protocol: + Current.Type = TT_ObjCDecl; + break; + case tok::objc_property: + Current.Type = TT_ObjCProperty; + break; + default: + break; + } } } else if (Current.is(tok::period)) { FormatToken *PreviousNoComment = Current.getPreviousNonComment(); @@ -875,7 +966,9 @@ private: // Line.MightBeFunctionDecl can only be true after the parentheses of a // function declaration have been found. Current.Type = TT_TrailingAnnotation; - } else if (Style.Language == FormatStyle::LK_Java && Current.Previous) { + } else if ((Style.Language == FormatStyle::LK_Java || + Style.Language == FormatStyle::LK_JavaScript) && + Current.Previous) { if (Current.Previous->is(tok::at) && Current.isNot(Keywords.kw_interface)) { const FormatToken &AtToken = *Current.Previous; @@ -902,7 +995,7 @@ private: return false; if (Tok.Previous->is(TT_LeadingJavaAnnotation)) - return false; + return false; // Skip "const" as it does not have an influence on whether this is a name. FormatToken *PreviousNotConst = Tok.Previous; @@ -964,8 +1057,7 @@ private: bool IsSizeOfOrAlignOf = LeftOfParens && LeftOfParens->isOneOf(tok::kw_sizeof, tok::kw_alignof); if (ParensAreType && !ParensCouldEndDecl && !IsSizeOfOrAlignOf && - ((Contexts.size() > 1 && Contexts[Contexts.size() - 2].IsExpression) || - (Tok.Next && Tok.Next->isBinaryOperator()))) + (Contexts.size() > 1 && Contexts[Contexts.size() - 2].IsExpression)) IsCast = true; else if (Tok.Next && Tok.Next->isNot(tok::string_literal) && (Tok.Next->Tok.isLiteral() || @@ -995,7 +1087,8 @@ private: } for (; Prev != Tok.MatchingParen; Prev = Prev->Previous) { - if (!Prev || !Prev->isOneOf(tok::kw_const, tok::identifier)) { + if (!Prev || + !Prev->isOneOf(tok::kw_const, tok::identifier, tok::coloncolon)) { IsCast = false; break; } @@ -1032,7 +1125,7 @@ private: if (NextToken->is(tok::l_square) && NextToken->isNot(TT_LambdaLSquare)) return TT_PointerOrReference; - if (NextToken->isOneOf(tok::kw_operator, tok::comma)) + if (NextToken->isOneOf(tok::kw_operator, tok::comma, tok::semi)) return TT_PointerOrReference; if (PrevToken->is(tok::r_paren) && PrevToken->MatchingParen && @@ -1108,10 +1201,16 @@ private: FormatToken *CurrentToken; bool AutoFound; const AdditionalKeywords &Keywords; + + // Set of "<" tokens that do not open a template parameter list. If parseAngle + // determines that a specific token can't be a template opener, it will make + // same decision irrespective of the decisions for tokens leading up to it. + // Store this information to prevent this from causing exponential runtime. + llvm::SmallPtrSet<FormatToken *, 16> NonTemplateLess; }; -static int PrecedenceUnaryOperator = prec::PointerToMember + 1; -static int PrecedenceArrowAndPeriod = prec::PointerToMember + 2; +static const int PrecedenceUnaryOperator = prec::PointerToMember + 1; +static const int PrecedenceArrowAndPeriod = prec::PointerToMember + 2; /// \brief Parses binary expressions by inserting fake parenthesis based on /// operator precedence. @@ -1361,12 +1460,13 @@ static bool isFunctionDeclarationName(const FormatToken &Current) { assert(Next->is(tok::l_paren)); if (Next->Next == Next->MatchingParen) return true; - for (const FormatToken *Tok = Next->Next; Tok != Next->MatchingParen; + for (const FormatToken *Tok = Next->Next; Tok && Tok != Next->MatchingParen; Tok = Tok->Next) { if (Tok->is(tok::kw_const) || Tok->isSimpleTypeSpecifier() || Tok->isOneOf(TT_PointerOrReference, TT_StartOfName)) return true; - if (Tok->isOneOf(tok::l_brace, tok::string_literal) || Tok->Tok.isLiteral()) + if (Tok->isOneOf(tok::l_brace, tok::string_literal, TT_ObjCMethodExpr) || + Tok->Tok.isLiteral()) return false; } return false; @@ -1502,7 +1602,7 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line, if (Left.is(tok::comma) && Left.NestingLevel == 0) return 3; } else if (Style.Language == FormatStyle::LK_JavaScript) { - if (Right.is(Keywords.kw_function)) + if (Right.is(Keywords.kw_function) && Left.isNot(tok::comma)) return 100; } @@ -1512,6 +1612,9 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line, if (Right.is(tok::l_square)) { if (Style.Language == FormatStyle::LK_Proto) return 1; + // Slightly prefer formatting local lambda definitions like functions. + if (Right.is(TT_LambdaLSquare) && Left.is(tok::equal)) + return 50; if (!Right.isOneOf(TT_ObjCMethodExpr, TT_LambdaLSquare)) return 500; } @@ -1521,11 +1624,15 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line, if (Line.First->is(tok::kw_for) && Right.PartOfMultiVariableDeclStmt) return 3; if (Left.is(TT_StartOfName)) - return 20; + return 110; if (InFunctionDecl && Right.NestingLevel == 0) return Style.PenaltyReturnTypeOnItsOwnLine; return 200; } + if (Right.is(TT_PointerOrReference)) + return 190; + if (Right.is(TT_TrailingReturnArrow)) + return 110; if (Left.is(tok::equal) && Right.is(tok::l_brace)) return 150; if (Left.is(TT_CastRParen)) @@ -1575,6 +1682,8 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line, if (Left.is(tok::l_paren) && InFunctionDecl && Style.AlignAfterOpenBracket) return 100; + if (Left.is(tok::l_paren) && Left.Previous && Left.Previous->is(tok::kw_if)) + return 1000; if (Left.is(tok::equal) && InFunctionDecl) return 110; if (Right.is(tok::r_brace)) @@ -1591,7 +1700,8 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line, return 50; if (Right.is(tok::lessless)) { - if (Left.is(tok::string_literal)) { + if (Left.is(tok::string_literal) && + (!Right.LastOperator || Right.OperatorIndex != 1)) { StringRef Content = Left.TokenText; if (Content.startswith("\"")) Content = Content.drop_front(1); @@ -1607,7 +1717,9 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line, if (Left.is(TT_ConditionalExpr)) return prec::Conditional; prec::Level Level = Left.getPrecedence(); - + if (Level != prec::Unknown) + return Level; + Level = Right.getPrecedence(); if (Level != prec::Unknown) return Level; @@ -1636,7 +1748,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line, if (Right.isOneOf(tok::semi, tok::comma)) return false; if (Right.is(tok::less) && - (Left.isOneOf(tok::kw_template, tok::r_paren) || + (Left.is(tok::kw_template) || (Line.Type == LT_ObjCDecl && Style.ObjCSpaceBeforeProtocolList))) return true; if (Left.isOneOf(tok::exclaim, tok::tilde)) @@ -1655,17 +1767,27 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line, if (Left.is(tok::l_square) && Right.is(tok::amp)) return false; if (Right.is(TT_PointerOrReference)) - return Left.Tok.isLiteral() || - (!Left.isOneOf(TT_PointerOrReference, tok::l_paren) && - Style.PointerAlignment != FormatStyle::PAS_Left); + return !(Left.is(tok::r_paren) && Left.MatchingParen && + (Left.MatchingParen->is(TT_OverloadedOperatorLParen) || + (Left.MatchingParen->Previous && + Left.MatchingParen->Previous->is( + TT_FunctionDeclarationName)))) && + (Left.Tok.isLiteral() || + (!Left.isOneOf(TT_PointerOrReference, tok::l_paren) && + (Style.PointerAlignment != FormatStyle::PAS_Left || + Line.IsMultiVariableDeclStmt))); if (Right.is(TT_FunctionTypeLParen) && Left.isNot(tok::l_paren) && (!Left.is(TT_PointerOrReference) || - Style.PointerAlignment != FormatStyle::PAS_Right)) + (Style.PointerAlignment != FormatStyle::PAS_Right && + !Line.IsMultiVariableDeclStmt))) return true; if (Left.is(TT_PointerOrReference)) return Right.Tok.isLiteral() || Right.is(TT_BlockComment) || - (!Right.isOneOf(TT_PointerOrReference, tok::l_paren) && - Style.PointerAlignment != FormatStyle::PAS_Right && Left.Previous && + (!Right.isOneOf(TT_PointerOrReference, TT_ArraySubscriptLSquare, + tok::l_paren) && + (Style.PointerAlignment != FormatStyle::PAS_Right && + !Line.IsMultiVariableDeclStmt) && + Left.Previous && !Left.Previous->isOneOf(tok::l_paren, tok::coloncolon)); if (Right.is(tok::star) && Left.is(tok::l_paren)) return false; @@ -1700,13 +1822,12 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line, return Line.Type == LT_ObjCDecl || Left.is(tok::semi) || (Style.SpaceBeforeParens != FormatStyle::SBPO_Never && (Left.isOneOf(tok::kw_if, tok::kw_for, tok::kw_while, - tok::kw_switch, tok::kw_case) || - (Left.isOneOf(tok::kw_try, tok::kw_catch, tok::kw_new, - tok::kw_delete) && - (!Left.Previous || Left.Previous->isNot(tok::period))) || - Left.IsForEachMacro)) || + tok::kw_switch, tok::kw_case, TT_ForEachMacro) || + (Left.isOneOf(tok::kw_try, Keywords.kw___except, tok::kw_catch, + tok::kw_new, tok::kw_delete) && + (!Left.Previous || Left.Previous->isNot(tok::period))))) || (Style.SpaceBeforeParens == FormatStyle::SBPO_Always && - (Left.is(tok::identifier) || Left.isFunctionLikeKeyword()) && + (Left.is(tok::identifier) || Left.isFunctionLikeKeyword() || Left.is(tok::r_paren)) && Line.Type != LT_PreprocessorDirective); } if (Left.is(tok::at) && Right.Tok.getObjCKeywordID() != tok::objc_not_keyword) @@ -1748,6 +1869,20 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line, } else if (Style.Language == FormatStyle::LK_JavaScript) { if (Left.is(Keywords.kw_var)) return true; + if (Right.isOneOf(TT_JsTypeColon, TT_JsTypeOptionalQuestion)) + return false; + if ((Left.is(tok::l_brace) || Right.is(tok::r_brace)) && + Line.First->isOneOf(Keywords.kw_import, tok::kw_export)) + return false; + if (Left.is(tok::ellipsis)) + return false; + if (Left.is(TT_TemplateCloser) && + !Right.isOneOf(tok::equal, tok::l_brace, tok::comma, tok::l_square, + Keywords.kw_implements, Keywords.kw_extends)) + // Type assertions ('<type>expr') are not followed by whitespace. Other + // locations that should have whitespace following are identified by the + // above set of follower tokens. + return false; } else if (Style.Language == FormatStyle::LK_Java) { if (Left.is(tok::r_square) && Right.is(tok::l_brace)) return true; @@ -1789,16 +1924,29 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line, return Right.is(tok::coloncolon); if (Right.is(TT_OverloadedOperatorLParen)) return false; - if (Right.is(tok::colon)) - return !Line.First->isOneOf(tok::kw_case, tok::kw_default) && - Right.getNextNonComment() && Right.isNot(TT_ObjCMethodExpr) && - !Left.is(tok::question) && - !(Right.is(TT_InlineASMColon) && Left.is(tok::coloncolon)) && - (Right.isNot(TT_DictLiteral) || Style.SpacesInContainerLiterals); + if (Right.is(tok::colon)) { + if (Line.First->isOneOf(tok::kw_case, tok::kw_default) || + !Right.getNextNonComment() || Right.getNextNonComment()->is(tok::semi)) + return false; + if (Right.is(TT_ObjCMethodExpr)) + return false; + if (Left.is(tok::question)) + return false; + if (Right.is(TT_InlineASMColon) && Left.is(tok::coloncolon)) + return false; + if (Right.is(TT_DictLiteral)) + return Style.SpacesInContainerLiterals; + return true; + } if (Left.is(TT_UnaryOperator)) return Right.is(TT_BinaryOperator); + + // If the next token is a binary operator or a selector name, we have + // incorrectly classified the parenthesis as a cast. FIXME: Detect correctly. if (Left.is(TT_CastRParen)) - return Style.SpaceAfterCStyleCast || Right.is(TT_BinaryOperator); + return Style.SpaceAfterCStyleCast || + Right.isOneOf(TT_BinaryOperator, TT_SelectorName); + if (Left.is(tok::greater) && Right.is(tok::greater)) { return Right.is(TT_TemplateCloser) && Left.is(TT_TemplateCloser) && (Style.Standard != FormatStyle::LS_Cpp11 || Style.SpacesInAngles); @@ -1819,7 +1967,8 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line, if ((Right.is(TT_BinaryOperator) && !Left.is(tok::l_paren)) || Left.isOneOf(TT_BinaryOperator, TT_ConditionalExpr)) return true; - if (Left.is(TT_TemplateCloser) && Right.is(tok::l_paren)) + if (Left.is(TT_TemplateCloser) && Right.is(tok::l_paren) && + Right.isNot(TT_FunctionTypeLParen)) return Style.SpaceBeforeParens == FormatStyle::SBPO_Always; if (Right.is(TT_TemplateOpener) && Left.is(tok::r_paren) && Left.MatchingParen && Left.MatchingParen->is(TT_OverloadedOperatorLParen)) @@ -1850,9 +1999,12 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line, // intention is to insert a line break after it in order to make shuffling // around entries easier. const FormatToken *BeforeClosingBrace = nullptr; - if (Left.is(tok::l_brace) && Left.BlockKind != BK_Block && Left.MatchingParen) + if (Left.isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) && + Left.BlockKind != BK_Block && Left.MatchingParen) BeforeClosingBrace = Left.MatchingParen->Previous; - else if (Right.is(tok::r_brace) && Right.BlockKind != BK_Block) + else if (Right.MatchingParen && + Right.MatchingParen->isOneOf(tok::l_brace, + TT_ArrayInitializerLSquare)) BeforeClosingBrace = &Left; if (BeforeClosingBrace && (BeforeClosingBrace->is(tok::comma) || BeforeClosingBrace->isTrailingComment())) @@ -1862,8 +2014,10 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line, return Left.BlockKind != BK_BracedInit && Left.isNot(TT_CtorInitializerColon) && (Right.NewlinesBefore > 0 && Right.HasUnescapedNewline); - if (Right.Previous->isTrailingComment() || - (Right.isStringLiteral() && Right.Previous->isStringLiteral())) + if (Left.isTrailingComment()) + return true; + if (Left.isStringLiteral() && + (Right.isStringLiteral() || Right.is(TT_ObjCStringLiteral))) return true; if (Right.Previous->IsUnterminatedLiteral) return true; @@ -1889,6 +2043,8 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line, Style.Language == FormatStyle::LK_Proto) // Don't put enums onto single lines in protocol buffers. return true; + if (Right.is(TT_InlineASMBrace)) + return Right.HasUnescapedNewline; if (Style.Language == FormatStyle::LK_JavaScript && Right.is(tok::r_brace) && Left.is(tok::l_brace) && !Left.Children.empty()) // Support AllowShortFunctionsOnASingleLine for JavaScript. @@ -1903,8 +2059,12 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line, return true; if (Left.is(TT_ObjCBlockLBrace) && !Style.AllowShortBlocksOnASingleLine) return true; - if (Right.is(tok::lessless) && Left.is(tok::identifier) && - Left.TokenText == "endl") + + if ((Style.Language == FormatStyle::LK_Java || + Style.Language == FormatStyle::LK_JavaScript) && + Left.is(TT_LeadingJavaAnnotation) && + Right.isNot(TT_LeadingJavaAnnotation) && Right.isNot(tok::l_paren) && + Line.Last->is(tok::l_brace)) return true; if (Style.Language == FormatStyle::LK_JavaScript) { @@ -1913,13 +2073,15 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line, Left.Previous->is(tok::char_constant)) return true; if (Left.is(TT_DictLiteral) && Left.is(tok::l_brace) && - Left.NestingLevel == 0) + Left.NestingLevel == 0 && Left.Previous && + Left.Previous->is(tok::equal) && + Line.First->isOneOf(tok::identifier, Keywords.kw_import, + tok::kw_export) && + // kw_var is a pseudo-token that's a tok::identifier, so matches above. + !Line.First->is(Keywords.kw_var)) + // Enum style object literal. return true; } else if (Style.Language == FormatStyle::LK_Java) { - if (Left.is(TT_LeadingJavaAnnotation) && - Right.isNot(TT_LeadingJavaAnnotation) && Right.isNot(tok::l_paren) && - Line.Last->is(tok::l_brace)) - return true; if (Right.is(tok::plus) && Left.is(tok::string_literal) && Right.Next && Right.Next->is(tok::string_literal)) return true; @@ -1947,9 +2109,15 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line, return false; if (Left.isOneOf(TT_JavaAnnotation, TT_LeadingJavaAnnotation)) return !Right.is(tok::l_paren); + if (Right.is(TT_PointerOrReference)) + return Line.IsMultiVariableDeclStmt || + (Style.PointerAlignment == FormatStyle::PAS_Right && + (!Right.Next || Right.Next->isNot(TT_FunctionDeclarationName))); if (Right.isOneOf(TT_StartOfName, TT_FunctionDeclarationName) || Right.is(tok::kw_operator)) return true; + if (Left.is(TT_PointerOrReference)) + return false; if (Right.isTrailingComment()) // We rely on MustBreakBefore being set correctly here as we should not // change the "binding" behavior of a comment. @@ -1970,8 +2138,9 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line, return false; if (Left.is(tok::colon) && (Left.isOneOf(TT_DictLiteral, TT_ObjCMethodExpr))) return true; - if (Right.is(TT_SelectorName)) - return true; + if (Right.is(TT_SelectorName) || (Right.is(tok::identifier) && Right.Next && + Right.Next->is(TT_ObjCMethodExpr))) + return Left.isNot(tok::period); // FIXME: Properly parse ObjC calls. if (Left.is(tok::r_paren) && Line.Type == LT_ObjCProperty) return true; if (Left.ClosesTemplateDeclaration) @@ -1983,17 +2152,16 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line, return true; if (Right.is(TT_RangeBasedForLoopColon)) return false; - if (Left.isOneOf(TT_PointerOrReference, TT_TemplateCloser, - TT_UnaryOperator) || + if (Left.isOneOf(TT_TemplateCloser, TT_UnaryOperator) || Left.is(tok::kw_operator)) return false; - if (Left.is(tok::equal) && Line.Type == LT_VirtualFunctionDecl) + if (Left.is(tok::equal) && !Right.isOneOf(tok::kw_default, tok::kw_delete) && + Line.Type == LT_VirtualFunctionDecl) return false; if (Left.is(tok::l_paren) && Left.is(TT_AttributeParen)) return false; if (Left.is(tok::l_paren) && Left.Previous && - (Left.Previous->isOneOf(TT_BinaryOperator, TT_CastRParen) || - Left.Previous->is(tok::kw_if))) + (Left.Previous->isOneOf(TT_BinaryOperator, TT_CastRParen))) return false; if (Right.is(TT_ImplicitStringLiteral)) return false; @@ -2027,8 +2195,8 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line, if (Right.is(TT_CtorInitializerComma) && Style.BreakConstructorInitializersBeforeComma) return true; - if (Left.is(tok::greater) && Right.is(tok::greater) && - Left.isNot(TT_TemplateCloser)) + if ((Left.is(tok::greater) && Right.is(tok::greater)) || + (Left.is(tok::less) && Right.is(tok::less))) return false; if (Right.is(TT_BinaryOperator) && Style.BreakBeforeBinaryOperators != FormatStyle::BOS_None && @@ -2046,8 +2214,9 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line, return true; return Left.isOneOf(tok::comma, tok::coloncolon, tok::semi, tok::l_brace, tok::kw_class, tok::kw_struct) || - Right.isMemberAccess() || Right.is(TT_TrailingReturnArrow) || - Right.isOneOf(tok::lessless, tok::colon, tok::l_square, tok::at) || + Right.isMemberAccess() || + Right.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow, tok::lessless, + tok::colon, tok::l_square, tok::at) || (Left.is(tok::r_paren) && Right.isOneOf(tok::identifier, tok::kw_const)) || (Left.is(tok::l_paren) && !Right.is(tok::r_paren)); diff --git a/contrib/llvm/tools/clang/lib/Format/TokenAnnotator.h b/contrib/llvm/tools/clang/lib/Format/TokenAnnotator.h index ff8e32a..a948cdb 100644 --- a/contrib/llvm/tools/clang/lib/Format/TokenAnnotator.h +++ b/contrib/llvm/tools/clang/lib/Format/TokenAnnotator.h @@ -42,8 +42,8 @@ public: : First(Line.Tokens.front().Tok), Level(Line.Level), InPPDirective(Line.InPPDirective), MustBeDeclaration(Line.MustBeDeclaration), MightBeFunctionDecl(false), - Affected(false), LeadingEmptyLinesAffected(false), - ChildrenAffected(false) { + IsMultiVariableDeclStmt(false), Affected(false), + LeadingEmptyLinesAffected(false), ChildrenAffected(false) { assert(!Line.Tokens.empty()); // Calculate Next and Previous for all tokens. Note that we must overwrite @@ -59,11 +59,8 @@ public: I->Tok->Previous = Current; Current = Current->Next; Current->Children.clear(); - for (SmallVectorImpl<UnwrappedLine>::const_iterator - I = Node.Children.begin(), - E = Node.Children.end(); - I != E; ++I) { - Children.push_back(new AnnotatedLine(*I)); + for (const auto& Child : Node.Children) { + Children.push_back(new AnnotatedLine(Child)); Current->Children.push_back(Children.back()); } } @@ -75,6 +72,12 @@ public: for (unsigned i = 0, e = Children.size(); i != e; ++i) { delete Children[i]; } + FormatToken *Current = First; + while (Current) { + Current->Children.clear(); + Current->Role.reset(); + Current = Current->Next; + } } FormatToken *First; @@ -87,6 +90,7 @@ public: bool InPPDirective; bool MustBeDeclaration; bool MightBeFunctionDecl; + bool IsMultiVariableDeclStmt; /// \c True if this line should be formatted, i.e. intersects directly or /// indirectly with one of the input ranges. @@ -101,8 +105,8 @@ public: private: // Disallow copying. - AnnotatedLine(const AnnotatedLine &) LLVM_DELETED_FUNCTION; - void operator=(const AnnotatedLine &) LLVM_DELETED_FUNCTION; + AnnotatedLine(const AnnotatedLine &) = delete; + void operator=(const AnnotatedLine &) = delete; }; /// \brief Determines extra information about the tokens comprising an diff --git a/contrib/llvm/tools/clang/lib/Format/UnwrappedLineFormatter.cpp b/contrib/llvm/tools/clang/lib/Format/UnwrappedLineFormatter.cpp index ca66e73..cbf8c6c 100644 --- a/contrib/llvm/tools/clang/lib/Format/UnwrappedLineFormatter.cpp +++ b/contrib/llvm/tools/clang/lib/Format/UnwrappedLineFormatter.cpp @@ -25,19 +25,152 @@ bool startsExternCBlock(const AnnotatedLine &Line) { NextNext && NextNext->is(tok::l_brace); } +/// \brief Tracks the indent level of \c AnnotatedLines across levels. +/// +/// \c nextLine must be called for each \c AnnotatedLine, after which \c +/// getIndent() will return the indent for the last line \c nextLine was called +/// with. +/// If the line is not formatted (and thus the indent does not change), calling +/// \c adjustToUnmodifiedLine after the call to \c nextLine will cause +/// subsequent lines on the same level to be indented at the same level as the +/// given line. +class LevelIndentTracker { +public: + LevelIndentTracker(const FormatStyle &Style, + const AdditionalKeywords &Keywords, unsigned StartLevel, + int AdditionalIndent) + : Style(Style), Keywords(Keywords), AdditionalIndent(AdditionalIndent) { + for (unsigned i = 0; i != StartLevel; ++i) + IndentForLevel.push_back(Style.IndentWidth * i + AdditionalIndent); + } + + /// \brief Returns the indent for the current line. + unsigned getIndent() const { return Indent; } + + /// \brief Update the indent state given that \p Line is going to be formatted + /// next. + void nextLine(const AnnotatedLine &Line) { + Offset = getIndentOffset(*Line.First); + if (Line.InPPDirective) { + Indent = Line.Level * Style.IndentWidth + AdditionalIndent; + } else { + while (IndentForLevel.size() <= Line.Level) + IndentForLevel.push_back(-1); + IndentForLevel.resize(Line.Level + 1); + Indent = getIndent(IndentForLevel, Line.Level); + } + if (static_cast<int>(Indent) + Offset >= 0) + Indent += Offset; + } + + /// \brief Update the level indent to adapt to the given \p Line. + /// + /// When a line is not formatted, we move the subsequent lines on the same + /// level to the same indent. + /// Note that \c nextLine must have been called before this method. + void adjustToUnmodifiedLine(const AnnotatedLine &Line) { + unsigned LevelIndent = Line.First->OriginalColumn; + if (static_cast<int>(LevelIndent) - Offset >= 0) + LevelIndent -= Offset; + if ((Line.First->isNot(tok::comment) || IndentForLevel[Line.Level] == -1) && + !Line.InPPDirective) + IndentForLevel[Line.Level] = LevelIndent; + } + +private: + /// \brief Get the offset of the line relatively to the level. + /// + /// For example, 'public:' labels in classes are offset by 1 or 2 + /// characters to the left from their level. + int getIndentOffset(const FormatToken &RootToken) { + if (Style.Language == FormatStyle::LK_Java || + Style.Language == FormatStyle::LK_JavaScript) + return 0; + if (RootToken.isAccessSpecifier(false) || + RootToken.isObjCAccessSpecifier() || + (RootToken.is(Keywords.kw_signals) && RootToken.Next && + RootToken.Next->is(tok::colon))) + return Style.AccessModifierOffset; + return 0; + } + + /// \brief Get the indent of \p Level from \p IndentForLevel. + /// + /// \p IndentForLevel must contain the indent for the level \c l + /// at \p IndentForLevel[l], or a value < 0 if the indent for + /// that level is unknown. + unsigned getIndent(ArrayRef<int> IndentForLevel, unsigned Level) { + if (IndentForLevel[Level] != -1) + return IndentForLevel[Level]; + if (Level == 0) + return 0; + return getIndent(IndentForLevel, Level - 1) + Style.IndentWidth; + } + + const FormatStyle &Style; + const AdditionalKeywords &Keywords; + const unsigned AdditionalIndent; + + /// \brief The indent in characters for each level. + std::vector<int> IndentForLevel; + + /// \brief Offset of the current line relative to the indent level. + /// + /// For example, the 'public' keywords is often indented with a negative + /// offset. + int Offset = 0; + + /// \brief The current line's indent. + unsigned Indent = 0; +}; + class LineJoiner { public: - LineJoiner(const FormatStyle &Style) : Style(Style) {} + LineJoiner(const FormatStyle &Style, const AdditionalKeywords &Keywords, + const SmallVectorImpl<AnnotatedLine *> &Lines) + : Style(Style), Keywords(Keywords), End(Lines.end()), + Next(Lines.begin()) {} + + /// \brief Returns the next line, merging multiple lines into one if possible. + const AnnotatedLine *getNextMergedLine(bool DryRun, + LevelIndentTracker &IndentTracker) { + if (Next == End) + return nullptr; + const AnnotatedLine *Current = *Next; + IndentTracker.nextLine(*Current); + unsigned MergedLines = + tryFitMultipleLinesInOne(IndentTracker.getIndent(), Next, End); + if (MergedLines > 0 && Style.ColumnLimit == 0) + // Disallow line merging if there is a break at the start of one of the + // input lines. + for (unsigned i = 0; i < MergedLines; ++i) + if (Next[i + 1]->First->NewlinesBefore > 0) + MergedLines = 0; + if (!DryRun) + for (unsigned i = 0; i < MergedLines; ++i) + join(*Next[i], *Next[i + 1]); + Next = Next + MergedLines + 1; + return Current; + } +private: /// \brief Calculates how many lines can be merged into 1 starting at \p I. unsigned tryFitMultipleLinesInOne(unsigned Indent, SmallVectorImpl<AnnotatedLine *>::const_iterator I, SmallVectorImpl<AnnotatedLine *>::const_iterator E) { + // Can't join the last line with anything. + if (I + 1 == E) + return 0; // We can never merge stuff if there are trailing line comments. const AnnotatedLine *TheLine = *I; if (TheLine->Last->is(TT_LineComment)) return 0; + if (I[1]->Type == LT_Invalid || I[1]->First->MustBreakBefore) + return 0; + if (TheLine->InPPDirective && + (!I[1]->InPPDirective || I[1]->First->HasUnescapedNewline)) + return 0; if (Style.ColumnLimit > 0 && Indent > Style.ColumnLimit) return 0; @@ -50,9 +183,6 @@ public: ? 0 : Limit - TheLine->Last->TotalLength; - if (I + 1 == E || I[1]->Type == LT_Invalid || I[1]->First->MustBreakBefore) - return 0; - // FIXME: TheLine->Level != 0 might or might not be the right check to do. // If necessary, change to something smarter. bool MergeShortFunctions = @@ -113,15 +243,12 @@ public: return 0; } -private: unsigned tryMergeSimplePPDirective(SmallVectorImpl<AnnotatedLine *>::const_iterator I, SmallVectorImpl<AnnotatedLine *>::const_iterator E, unsigned Limit) { if (Limit == 0) return 0; - if (!I[1]->InPPDirective || I[1]->First->HasUnescapedNewline) - return 0; if (I + 2 != E && I[2]->InPPDirective && !I[2]->First->HasUnescapedNewline) return 0; if (1 + I[1]->Last->TotalLength > Limit) @@ -147,8 +274,8 @@ private: return 0; if (1 + I[1]->Last->TotalLength > Limit) return 0; - if (I[1]->First->isOneOf(tok::semi, tok::kw_if, tok::kw_for, - tok::kw_while, TT_LineComment)) + if (I[1]->First->isOneOf(tok::semi, tok::kw_if, tok::kw_for, tok::kw_while, + TT_LineComment)) return 0; // Only inline simple if's (no nested if or else). if (I + 2 != E && Line.First->is(tok::kw_if) && @@ -157,9 +284,10 @@ private: return 1; } - unsigned tryMergeShortCaseLabels( - SmallVectorImpl<AnnotatedLine *>::const_iterator I, - SmallVectorImpl<AnnotatedLine *>::const_iterator E, unsigned Limit) { + unsigned + tryMergeShortCaseLabels(SmallVectorImpl<AnnotatedLine *>::const_iterator I, + SmallVectorImpl<AnnotatedLine *>::const_iterator E, + unsigned Limit) { if (Limit == 0 || I + 1 == E || I[1]->First->isOneOf(tok::kw_case, tok::kw_default)) return 0; @@ -191,16 +319,21 @@ private: AnnotatedLine &Line = **I; // Don't merge ObjC @ keywords and methods. + // FIXME: If an option to allow short exception handling clauses on a single + // line is added, change this to not return for @try and friends. if (Style.Language != FormatStyle::LK_Java && Line.First->isOneOf(tok::at, tok::minus, tok::plus)) return 0; // Check that the current line allows merging. This depends on whether we // are in a control flow statements as well as several style flags. - if (Line.First->isOneOf(tok::kw_else, tok::kw_case)) + if (Line.First->isOneOf(tok::kw_else, tok::kw_case) || + (Line.First->Next && Line.First->Next->is(tok::kw_else))) return 0; if (Line.First->isOneOf(tok::kw_if, tok::kw_while, tok::kw_do, tok::kw_try, - tok::kw_catch, tok::kw_for, tok::r_brace)) { + tok::kw___try, tok::kw_catch, tok::kw___finally, + tok::kw_for, tok::r_brace) || + Line.First->is(Keywords.kw___except)) { if (!Style.AllowShortBlocksOnASingleLine) return 0; if (!Style.AllowShortIfStatementsOnASingleLine && @@ -211,7 +344,11 @@ private: return 0; // FIXME: Consider an option to allow short exception handling clauses on // a single line. - if (Line.First->isOneOf(tok::kw_try, tok::kw_catch)) + // FIXME: This isn't covered by tests. + // FIXME: For catch, __except, __finally the first token on the line + // is '}', so this isn't correct here. + if (Line.First->isOneOf(tok::kw_try, tok::kw___try, tok::kw_catch, + Keywords.kw___except, tok::kw___finally)) return 0; } @@ -226,7 +363,8 @@ private: } else if (Limit != 0 && Line.First->isNot(tok::kw_namespace) && !startsExternCBlock(Line)) { // We don't merge short records. - if (Line.First->isOneOf(tok::kw_class, tok::kw_union, tok::kw_struct)) + if (Line.First->isOneOf(tok::kw_class, tok::kw_union, tok::kw_struct, + Keywords.kw_interface)) return 0; // Check that we still have three lines and they fit into the limit. @@ -252,6 +390,10 @@ private: if (Tok->isNot(tok::r_brace)) return 0; + // Don't merge "if (a) { .. } else {". + if (Tok->Next && Tok->Next->is(tok::kw_else)) + return 0; + return 2; } return 0; @@ -285,28 +427,367 @@ private: return false; } + void join(AnnotatedLine &A, const AnnotatedLine &B) { + assert(!A.Last->Next); + assert(!B.First->Previous); + if (B.Affected) + A.Affected = true; + A.Last->Next = B.First; + B.First->Previous = A.Last; + B.First->CanBreakBefore = true; + unsigned LengthA = A.Last->TotalLength + B.First->SpacesRequiredBefore; + for (FormatToken *Tok = B.First; Tok; Tok = Tok->Next) { + Tok->TotalLength += LengthA; + A.Last = Tok; + } + } + const FormatStyle &Style; + const AdditionalKeywords &Keywords; + const SmallVectorImpl<AnnotatedLine*>::const_iterator End; + + SmallVectorImpl<AnnotatedLine*>::const_iterator Next; }; -class NoColumnLimitFormatter { +static void markFinalized(FormatToken *Tok) { + for (; Tok; Tok = Tok->Next) { + Tok->Finalized = true; + for (AnnotatedLine *Child : Tok->Children) + markFinalized(Child->First); + } +} + +#ifndef NDEBUG +static void printLineState(const LineState &State) { + llvm::dbgs() << "State: "; + for (const ParenState &P : State.Stack) { + llvm::dbgs() << P.Indent << "|" << P.LastSpace << "|" << P.NestedBlockIndent + << " "; + } + llvm::dbgs() << State.NextToken->TokenText << "\n"; +} +#endif + +/// \brief Base class for classes that format one \c AnnotatedLine. +class LineFormatter { public: - NoColumnLimitFormatter(ContinuationIndenter *Indenter) : Indenter(Indenter) {} + LineFormatter(ContinuationIndenter *Indenter, WhitespaceManager *Whitespaces, + const FormatStyle &Style, + UnwrappedLineFormatter *BlockFormatter) + : Indenter(Indenter), Whitespaces(Whitespaces), Style(Style), + BlockFormatter(BlockFormatter) {} + virtual ~LineFormatter() {} + + /// \brief Formats an \c AnnotatedLine and returns the penalty. + /// + /// If \p DryRun is \c false, directly applies the changes. + virtual unsigned formatLine(const AnnotatedLine &Line, unsigned FirstIndent, + bool DryRun) = 0; + +protected: + /// \brief If the \p State's next token is an r_brace closing a nested block, + /// format the nested block before it. + /// + /// Returns \c true if all children could be placed successfully and adapts + /// \p Penalty as well as \p State. If \p DryRun is false, also directly + /// creates changes using \c Whitespaces. + /// + /// The crucial idea here is that children always get formatted upon + /// encountering the closing brace right after the nested block. Now, if we + /// are currently trying to keep the "}" on the same line (i.e. \p NewLine is + /// \c false), the entire block has to be kept on the same line (which is only + /// possible if it fits on the line, only contains a single statement, etc. + /// + /// If \p NewLine is true, we format the nested block on separate lines, i.e. + /// break after the "{", format all lines with correct indentation and the put + /// the closing "}" on yet another new line. + /// + /// This enables us to keep the simple structure of the + /// \c UnwrappedLineFormatter, where we only have two options for each token: + /// break or don't break. + bool formatChildren(LineState &State, bool NewLine, bool DryRun, + unsigned &Penalty) { + const FormatToken *LBrace = State.NextToken->getPreviousNonComment(); + FormatToken &Previous = *State.NextToken->Previous; + if (!LBrace || LBrace->isNot(tok::l_brace) || + LBrace->BlockKind != BK_Block || Previous.Children.size() == 0) + // The previous token does not open a block. Nothing to do. We don't + // assert so that we can simply call this function for all tokens. + return true; + + if (NewLine) { + int AdditionalIndent = State.Stack.back().Indent - + Previous.Children[0]->Level * Style.IndentWidth; + + Penalty += + BlockFormatter->format(Previous.Children, DryRun, AdditionalIndent, + /*FixBadIndentation=*/true); + return true; + } + + if (Previous.Children[0]->First->MustBreakBefore) + return false; + + // Cannot merge multiple statements into a single line. + if (Previous.Children.size() > 1) + return false; + + // Cannot merge into one line if this line ends on a comment. + if (Previous.is(tok::comment)) + return false; + + // We can't put the closing "}" on a line with a trailing comment. + if (Previous.Children[0]->Last->isTrailingComment()) + return false; + + // If the child line exceeds the column limit, we wouldn't want to merge it. + // We add +2 for the trailing " }". + if (Style.ColumnLimit > 0 && + Previous.Children[0]->Last->TotalLength + State.Column + 2 > + Style.ColumnLimit) + return false; + + if (!DryRun) { + Whitespaces->replaceWhitespace( + *Previous.Children[0]->First, + /*Newlines=*/0, /*IndentLevel=*/0, /*Spaces=*/1, + /*StartOfTokenColumn=*/State.Column, State.Line->InPPDirective); + } + Penalty += formatLine(*Previous.Children[0], State.Column + 1, DryRun); + + State.Column += 1 + Previous.Children[0]->Last->TotalLength; + return true; + } + + ContinuationIndenter *Indenter; + +private: + WhitespaceManager *Whitespaces; + const FormatStyle &Style; + UnwrappedLineFormatter *BlockFormatter; +}; - /// \brief Formats the line starting at \p State, simply keeping all of the - /// input's line breaking decisions. - void format(unsigned FirstIndent, const AnnotatedLine *Line) { +/// \brief Formatter that keeps the existing line breaks. +class NoColumnLimitLineFormatter : public LineFormatter { +public: + NoColumnLimitLineFormatter(ContinuationIndenter *Indenter, + WhitespaceManager *Whitespaces, + const FormatStyle &Style, + UnwrappedLineFormatter *BlockFormatter) + : LineFormatter(Indenter, Whitespaces, Style, BlockFormatter) {} + + /// \brief Formats the line, simply keeping all of the input's line breaking + /// decisions. + unsigned formatLine(const AnnotatedLine &Line, unsigned FirstIndent, + bool DryRun) override { + assert(!DryRun); LineState State = - Indenter->getInitialState(FirstIndent, Line, /*DryRun=*/false); + Indenter->getInitialState(FirstIndent, &Line, /*DryRun=*/false); while (State.NextToken) { bool Newline = Indenter->mustBreak(State) || (Indenter->canBreak(State) && State.NextToken->NewlinesBefore > 0); + unsigned Penalty = 0; + formatChildren(State, Newline, /*DryRun=*/false, Penalty); Indenter->addTokenToState(State, Newline, /*DryRun=*/false); } + return 0; + } +}; + +/// \brief Formatter that puts all tokens into a single line without breaks. +class NoLineBreakFormatter : public LineFormatter { +public: + NoLineBreakFormatter(ContinuationIndenter *Indenter, + WhitespaceManager *Whitespaces, const FormatStyle &Style, + UnwrappedLineFormatter *BlockFormatter) + : LineFormatter(Indenter, Whitespaces, Style, BlockFormatter) {} + + /// \brief Puts all tokens into a single line. + unsigned formatLine(const AnnotatedLine &Line, unsigned FirstIndent, + bool DryRun) { + unsigned Penalty = 0; + LineState State = Indenter->getInitialState(FirstIndent, &Line, DryRun); + while (State.NextToken) { + formatChildren(State, /*Newline=*/false, DryRun, Penalty); + Indenter->addTokenToState(State, /*Newline=*/false, DryRun); + } + return Penalty; + } +}; + +/// \brief Finds the best way to break lines. +class OptimizingLineFormatter : public LineFormatter { +public: + OptimizingLineFormatter(ContinuationIndenter *Indenter, + WhitespaceManager *Whitespaces, + const FormatStyle &Style, + UnwrappedLineFormatter *BlockFormatter) + : LineFormatter(Indenter, Whitespaces, Style, BlockFormatter) {} + + /// \brief Formats the line by finding the best line breaks with line lengths + /// below the column limit. + unsigned formatLine(const AnnotatedLine &Line, unsigned FirstIndent, + bool DryRun) { + LineState State = Indenter->getInitialState(FirstIndent, &Line, DryRun); + + // If the ObjC method declaration does not fit on a line, we should format + // it with one arg per line. + if (State.Line->Type == LT_ObjCMethodDecl) + State.Stack.back().BreakBeforeParameter = true; + + // Find best solution in solution space. + return analyzeSolutionSpace(State, DryRun); } private: - ContinuationIndenter *Indenter; + struct CompareLineStatePointers { + bool operator()(LineState *obj1, LineState *obj2) const { + return *obj1 < *obj2; + } + }; + + /// \brief A pair of <penalty, count> that is used to prioritize the BFS on. + /// + /// In case of equal penalties, we want to prefer states that were inserted + /// first. During state generation we make sure that we insert states first + /// that break the line as late as possible. + typedef std::pair<unsigned, unsigned> OrderedPenalty; + + /// \brief An edge in the solution space from \c Previous->State to \c State, + /// inserting a newline dependent on the \c NewLine. + struct StateNode { + StateNode(const LineState &State, bool NewLine, StateNode *Previous) + : State(State), NewLine(NewLine), Previous(Previous) {} + LineState State; + bool NewLine; + StateNode *Previous; + }; + + /// \brief An item in the prioritized BFS search queue. The \c StateNode's + /// \c State has the given \c OrderedPenalty. + typedef std::pair<OrderedPenalty, StateNode *> QueueItem; + + /// \brief The BFS queue type. + typedef std::priority_queue<QueueItem, std::vector<QueueItem>, + std::greater<QueueItem>> QueueType; + + /// \brief Analyze the entire solution space starting from \p InitialState. + /// + /// This implements a variant of Dijkstra's algorithm on the graph that spans + /// the solution space (\c LineStates are the nodes). The algorithm tries to + /// find the shortest path (the one with lowest penalty) from \p InitialState + /// to a state where all tokens are placed. Returns the penalty. + /// + /// If \p DryRun is \c false, directly applies the changes. + unsigned analyzeSolutionSpace(LineState &InitialState, bool DryRun) { + std::set<LineState *, CompareLineStatePointers> Seen; + + // Increasing count of \c StateNode items we have created. This is used to + // create a deterministic order independent of the container. + unsigned Count = 0; + QueueType Queue; + + // Insert start element into queue. + StateNode *Node = + new (Allocator.Allocate()) StateNode(InitialState, false, nullptr); + Queue.push(QueueItem(OrderedPenalty(0, Count), Node)); + ++Count; + + unsigned Penalty = 0; + + // While not empty, take first element and follow edges. + while (!Queue.empty()) { + Penalty = Queue.top().first.first; + StateNode *Node = Queue.top().second; + if (!Node->State.NextToken) { + DEBUG(llvm::dbgs() << "\n---\nPenalty for line: " << Penalty << "\n"); + break; + } + Queue.pop(); + + // Cut off the analysis of certain solutions if the analysis gets too + // complex. See description of IgnoreStackForComparison. + if (Count > 10000) + Node->State.IgnoreStackForComparison = true; + + if (!Seen.insert(&Node->State).second) + // State already examined with lower penalty. + continue; + + FormatDecision LastFormat = Node->State.NextToken->Decision; + if (LastFormat == FD_Unformatted || LastFormat == FD_Continue) + addNextStateToQueue(Penalty, Node, /*NewLine=*/false, &Count, &Queue); + if (LastFormat == FD_Unformatted || LastFormat == FD_Break) + addNextStateToQueue(Penalty, Node, /*NewLine=*/true, &Count, &Queue); + } + + if (Queue.empty()) { + // We were unable to find a solution, do nothing. + // FIXME: Add diagnostic? + DEBUG(llvm::dbgs() << "Could not find a solution.\n"); + return 0; + } + + // Reconstruct the solution. + if (!DryRun) + reconstructPath(InitialState, Queue.top().second); + + DEBUG(llvm::dbgs() << "Total number of analyzed states: " << Count << "\n"); + DEBUG(llvm::dbgs() << "---\n"); + + return Penalty; + } + + /// \brief Add the following state to the analysis queue \c Queue. + /// + /// Assume the current state is \p PreviousNode and has been reached with a + /// penalty of \p Penalty. Insert a line break if \p NewLine is \c true. + void addNextStateToQueue(unsigned Penalty, StateNode *PreviousNode, + bool NewLine, unsigned *Count, QueueType *Queue) { + if (NewLine && !Indenter->canBreak(PreviousNode->State)) + return; + if (!NewLine && Indenter->mustBreak(PreviousNode->State)) + return; + + StateNode *Node = new (Allocator.Allocate()) + StateNode(PreviousNode->State, NewLine, PreviousNode); + if (!formatChildren(Node->State, NewLine, /*DryRun=*/true, Penalty)) + return; + + Penalty += Indenter->addTokenToState(Node->State, NewLine, true); + + Queue->push(QueueItem(OrderedPenalty(Penalty, *Count), Node)); + ++(*Count); + } + + /// \brief Applies the best formatting by reconstructing the path in the + /// solution space that leads to \c Best. + void reconstructPath(LineState &State, StateNode *Best) { + std::deque<StateNode *> Path; + // We do not need a break before the initial token. + while (Best->Previous) { + Path.push_front(Best); + Best = Best->Previous; + } + for (std::deque<StateNode *>::iterator I = Path.begin(), E = Path.end(); + I != E; ++I) { + unsigned Penalty = 0; + formatChildren(State, (*I)->NewLine, /*DryRun=*/false, Penalty); + Penalty += Indenter->addTokenToState(State, (*I)->NewLine, false); + + DEBUG({ + printLineState((*I)->Previous->State); + if ((*I)->NewLine) { + llvm::dbgs() << "Penalty for placing " + << (*I)->Previous->State.NextToken->Tok.getName() << ": " + << Penalty << "\n"; + } + }); + } + } + + llvm::SpecificBumpPtrAllocator<StateNode> Allocator; }; } // namespace @@ -315,7 +796,7 @@ unsigned UnwrappedLineFormatter::format(const SmallVectorImpl<AnnotatedLine *> &Lines, bool DryRun, int AdditionalIndent, bool FixBadIndentation) { - LineJoiner Joiner(Style); + LineJoiner Joiner(Style, Keywords, Lines); // Try to look up already computed penalty in DryRun-mode. std::pair<const SmallVectorImpl<AnnotatedLine *> *, unsigned> CacheKey( @@ -326,151 +807,93 @@ UnwrappedLineFormatter::format(const SmallVectorImpl<AnnotatedLine *> &Lines, assert(!Lines.empty()); unsigned Penalty = 0; - std::vector<int> IndentForLevel; - for (unsigned i = 0, e = Lines[0]->Level; i != e; ++i) - IndentForLevel.push_back(Style.IndentWidth * i + AdditionalIndent); + LevelIndentTracker IndentTracker(Style, Keywords, Lines[0]->Level, + AdditionalIndent); const AnnotatedLine *PreviousLine = nullptr; - for (SmallVectorImpl<AnnotatedLine *>::const_iterator I = Lines.begin(), - E = Lines.end(); - I != E; ++I) { - const AnnotatedLine &TheLine = **I; - const FormatToken *FirstTok = TheLine.First; - int Offset = getIndentOffset(*FirstTok); - - // Determine indent and try to merge multiple unwrapped lines. - unsigned Indent; - if (TheLine.InPPDirective) { - Indent = TheLine.Level * Style.IndentWidth; - } else { - while (IndentForLevel.size() <= TheLine.Level) - IndentForLevel.push_back(-1); - IndentForLevel.resize(TheLine.Level + 1); - Indent = getIndent(IndentForLevel, TheLine.Level); - } - unsigned LevelIndent = Indent; - if (static_cast<int>(Indent) + Offset >= 0) - Indent += Offset; - - // Merge multiple lines if possible. - unsigned MergedLines = Joiner.tryFitMultipleLinesInOne(Indent, I, E); - if (MergedLines > 0 && Style.ColumnLimit == 0) { - // Disallow line merging if there is a break at the start of one of the - // input lines. - for (unsigned i = 0; i < MergedLines; ++i) { - if (I[i + 1]->First->NewlinesBefore > 0) - MergedLines = 0; - } - } - if (!DryRun) { - for (unsigned i = 0; i < MergedLines; ++i) { - join(*I[i], *I[i + 1]); - } - } - I += MergedLines; - + const AnnotatedLine *NextLine = nullptr; + for (const AnnotatedLine *Line = + Joiner.getNextMergedLine(DryRun, IndentTracker); + Line; Line = NextLine) { + const AnnotatedLine &TheLine = *Line; + unsigned Indent = IndentTracker.getIndent(); bool FixIndentation = - FixBadIndentation && (LevelIndent != FirstTok->OriginalColumn); - if (TheLine.First->is(tok::eof)) { - if (PreviousLine && PreviousLine->Affected && !DryRun) { - // Remove the file's trailing whitespace. - unsigned Newlines = std::min(FirstTok->NewlinesBefore, 1u); - Whitespaces->replaceWhitespace(*TheLine.First, Newlines, - /*IndentLevel=*/0, /*Spaces=*/0, - /*TargetColumn=*/0); - } - } else if (TheLine.Type != LT_Invalid && - (TheLine.Affected || FixIndentation)) { - if (FirstTok->WhitespaceRange.isValid()) { - if (!DryRun) - formatFirstToken(*TheLine.First, PreviousLine, TheLine.Level, Indent, + FixBadIndentation && (Indent != TheLine.First->OriginalColumn); + bool ShouldFormat = TheLine.Affected || FixIndentation; + // We cannot format this line; if the reason is that the line had a + // parsing error, remember that. + if (ShouldFormat && TheLine.Type == LT_Invalid && IncompleteFormat) + *IncompleteFormat = true; + + if (ShouldFormat && TheLine.Type != LT_Invalid) { + if (!DryRun) + formatFirstToken(*TheLine.First, PreviousLine, TheLine.Level, Indent, + TheLine.InPPDirective); + + NextLine = Joiner.getNextMergedLine(DryRun, IndentTracker); + unsigned ColumnLimit = getColumnLimit(TheLine.InPPDirective, NextLine); + bool FitsIntoOneLine = + TheLine.Last->TotalLength + Indent <= ColumnLimit || + TheLine.Type == LT_ImportStatement; + + if (Style.ColumnLimit == 0) + NoColumnLimitLineFormatter(Indenter, Whitespaces, Style, this) + .formatLine(TheLine, Indent, DryRun); + else if (FitsIntoOneLine) + Penalty += NoLineBreakFormatter(Indenter, Whitespaces, Style, this) + .formatLine(TheLine, Indent, DryRun); + else + Penalty += OptimizingLineFormatter(Indenter, Whitespaces, Style, this) + .formatLine(TheLine, Indent, DryRun); + } else { + // If no token in the current line is affected, we still need to format + // affected children. + if (TheLine.ChildrenAffected) + format(TheLine.Children, DryRun); + + // Adapt following lines on the current indent level to the same level + // unless the current \c AnnotatedLine is not at the beginning of a line. + bool StartsNewLine = + TheLine.First->NewlinesBefore > 0 || TheLine.First->IsFirst; + if (StartsNewLine) + IndentTracker.adjustToUnmodifiedLine(TheLine); + if (!DryRun) { + bool ReformatLeadingWhitespace = + StartsNewLine && ((PreviousLine && PreviousLine->Affected) || + TheLine.LeadingEmptyLinesAffected); + // Format the first token. + if (ReformatLeadingWhitespace) + formatFirstToken(*TheLine.First, PreviousLine, TheLine.Level, + TheLine.First->OriginalColumn, TheLine.InPPDirective); - } else { - Indent = LevelIndent = FirstTok->OriginalColumn; - } - - // If everything fits on a single line, just put it there. - unsigned ColumnLimit = Style.ColumnLimit; - if (I + 1 != E) { - AnnotatedLine *NextLine = I[1]; - if (NextLine->InPPDirective && !NextLine->First->HasUnescapedNewline) - ColumnLimit = getColumnLimit(TheLine.InPPDirective); - } + else + Whitespaces->addUntouchableToken(*TheLine.First, + TheLine.InPPDirective); - if (TheLine.Last->TotalLength + Indent <= ColumnLimit || - TheLine.Type == LT_ImportStatement) { - LineState State = Indenter->getInitialState(Indent, &TheLine, DryRun); - while (State.NextToken) { - formatChildren(State, /*Newline=*/false, /*DryRun=*/false, Penalty); - Indenter->addTokenToState(State, /*Newline=*/false, DryRun); - } - } else if (Style.ColumnLimit == 0) { - // FIXME: Implement nested blocks for ColumnLimit = 0. - NoColumnLimitFormatter Formatter(Indenter); - if (!DryRun) - Formatter.format(Indent, &TheLine); - } else { - Penalty += format(TheLine, Indent, DryRun); - } - - if (!TheLine.InPPDirective) - IndentForLevel[TheLine.Level] = LevelIndent; - } else if (TheLine.ChildrenAffected) { - format(TheLine.Children, DryRun); - } else { - // Format the first token if necessary, and notify the WhitespaceManager - // about the unchanged whitespace. - for (FormatToken *Tok = TheLine.First; Tok; Tok = Tok->Next) { - if (Tok == TheLine.First && (Tok->NewlinesBefore > 0 || Tok->IsFirst)) { - unsigned LevelIndent = Tok->OriginalColumn; - if (!DryRun) { - // Remove trailing whitespace of the previous line. - if ((PreviousLine && PreviousLine->Affected) || - TheLine.LeadingEmptyLinesAffected) { - formatFirstToken(*Tok, PreviousLine, TheLine.Level, LevelIndent, - TheLine.InPPDirective); - } else { - Whitespaces->addUntouchableToken(*Tok, TheLine.InPPDirective); - } - } - - if (static_cast<int>(LevelIndent) - Offset >= 0) - LevelIndent -= Offset; - if (Tok->isNot(tok::comment) && !TheLine.InPPDirective) - IndentForLevel[TheLine.Level] = LevelIndent; - } else if (!DryRun) { + // Notify the WhitespaceManager about the unchanged whitespace. + for (FormatToken *Tok = TheLine.First->Next; Tok; Tok = Tok->Next) Whitespaces->addUntouchableToken(*Tok, TheLine.InPPDirective); - } - } - } - if (!DryRun) { - for (FormatToken *Tok = TheLine.First; Tok; Tok = Tok->Next) { - Tok->Finalized = true; } + NextLine = Joiner.getNextMergedLine(DryRun, IndentTracker); } - PreviousLine = *I; + if (!DryRun) + markFinalized(TheLine.First); + PreviousLine = &TheLine; } PenaltyCache[CacheKey] = Penalty; return Penalty; } -unsigned UnwrappedLineFormatter::format(const AnnotatedLine &Line, - unsigned FirstIndent, bool DryRun) { - LineState State = Indenter->getInitialState(FirstIndent, &Line, DryRun); - - // If the ObjC method declaration does not fit on a line, we should format - // it with one arg per line. - if (State.Line->Type == LT_ObjCMethodDecl) - State.Stack.back().BreakBeforeParameter = true; - - // Find best solution in solution space. - return analyzeSolutionSpace(State, DryRun); -} - void UnwrappedLineFormatter::formatFirstToken(FormatToken &RootToken, const AnnotatedLine *PreviousLine, unsigned IndentLevel, unsigned Indent, bool InPPDirective) { + if (RootToken.is(tok::eof)) { + unsigned Newlines = std::min(RootToken.NewlinesBefore, 1u); + Whitespaces->replaceWhitespace(RootToken, Newlines, /*IndentLevel=*/0, + /*Spaces=*/0, /*TargetColumn=*/0); + return; + } unsigned Newlines = std::min(RootToken.NewlinesBefore, Style.MaxEmptyLinesToKeep + 1); // Remove empty lines before "}" where applicable. @@ -496,7 +919,8 @@ void UnwrappedLineFormatter::formatFirstToken(FormatToken &RootToken, ++Newlines; // Remove empty lines after access specifiers. - if (PreviousLine && PreviousLine->First->isAccessSpecifier()) + if (PreviousLine && PreviousLine->First->isAccessSpecifier() && + (!PreviousLine->InPPDirective || !RootToken.HasUnescapedNewline)) Newlines = std::min(1u, Newlines); Whitespaces->replaceWhitespace(RootToken, Newlines, IndentLevel, Indent, @@ -504,202 +928,21 @@ void UnwrappedLineFormatter::formatFirstToken(FormatToken &RootToken, !RootToken.HasUnescapedNewline); } -/// \brief Get the indent of \p Level from \p IndentForLevel. -/// -/// \p IndentForLevel must contain the indent for the level \c l -/// at \p IndentForLevel[l], or a value < 0 if the indent for -/// that level is unknown. -unsigned UnwrappedLineFormatter::getIndent(ArrayRef<int> IndentForLevel, - unsigned Level) { - if (IndentForLevel[Level] != -1) - return IndentForLevel[Level]; - if (Level == 0) - return 0; - return getIndent(IndentForLevel, Level - 1) + Style.IndentWidth; -} - -void UnwrappedLineFormatter::join(AnnotatedLine &A, const AnnotatedLine &B) { - assert(!A.Last->Next); - assert(!B.First->Previous); - if (B.Affected) - A.Affected = true; - A.Last->Next = B.First; - B.First->Previous = A.Last; - B.First->CanBreakBefore = true; - unsigned LengthA = A.Last->TotalLength + B.First->SpacesRequiredBefore; - for (FormatToken *Tok = B.First; Tok; Tok = Tok->Next) { - Tok->TotalLength += LengthA; - A.Last = Tok; - } -} - -unsigned UnwrappedLineFormatter::analyzeSolutionSpace(LineState &InitialState, - bool DryRun) { - std::set<LineState *, CompareLineStatePointers> Seen; - - // Increasing count of \c StateNode items we have created. This is used to - // create a deterministic order independent of the container. - unsigned Count = 0; - QueueType Queue; - - // Insert start element into queue. - StateNode *Node = - new (Allocator.Allocate()) StateNode(InitialState, false, nullptr); - Queue.push(QueueItem(OrderedPenalty(0, Count), Node)); - ++Count; - - unsigned Penalty = 0; - - // While not empty, take first element and follow edges. - while (!Queue.empty()) { - Penalty = Queue.top().first.first; - StateNode *Node = Queue.top().second; - if (!Node->State.NextToken) { - DEBUG(llvm::dbgs() << "\n---\nPenalty for line: " << Penalty << "\n"); - break; - } - Queue.pop(); - - // Cut off the analysis of certain solutions if the analysis gets too - // complex. See description of IgnoreStackForComparison. - if (Count > 10000) - Node->State.IgnoreStackForComparison = true; - - if (!Seen.insert(&Node->State).second) - // State already examined with lower penalty. - continue; - - FormatDecision LastFormat = Node->State.NextToken->Decision; - if (LastFormat == FD_Unformatted || LastFormat == FD_Continue) - addNextStateToQueue(Penalty, Node, /*NewLine=*/false, &Count, &Queue); - if (LastFormat == FD_Unformatted || LastFormat == FD_Break) - addNextStateToQueue(Penalty, Node, /*NewLine=*/true, &Count, &Queue); - } - - if (Queue.empty()) { - // We were unable to find a solution, do nothing. - // FIXME: Add diagnostic? - DEBUG(llvm::dbgs() << "Could not find a solution.\n"); - return 0; - } - - // Reconstruct the solution. - if (!DryRun) - reconstructPath(InitialState, Queue.top().second); - - DEBUG(llvm::dbgs() << "Total number of analyzed states: " << Count << "\n"); - DEBUG(llvm::dbgs() << "---\n"); - - return Penalty; -} - -#ifndef NDEBUG -static void printLineState(const LineState &State) { - llvm::dbgs() << "State: "; - for (const ParenState &P : State.Stack) { - llvm::dbgs() << P.Indent << "|" << P.LastSpace << "|" << P.NestedBlockIndent - << " "; - } - llvm::dbgs() << State.NextToken->TokenText << "\n"; -} -#endif - -void UnwrappedLineFormatter::reconstructPath(LineState &State, - StateNode *Current) { - std::deque<StateNode *> Path; - // We do not need a break before the initial token. - while (Current->Previous) { - Path.push_front(Current); - Current = Current->Previous; - } - for (std::deque<StateNode *>::iterator I = Path.begin(), E = Path.end(); - I != E; ++I) { - unsigned Penalty = 0; - formatChildren(State, (*I)->NewLine, /*DryRun=*/false, Penalty); - Penalty += Indenter->addTokenToState(State, (*I)->NewLine, false); - - DEBUG({ - printLineState((*I)->Previous->State); - if ((*I)->NewLine) { - llvm::dbgs() << "Penalty for placing " - << (*I)->Previous->State.NextToken->Tok.getName() << ": " - << Penalty << "\n"; - } - }); - } -} - -void UnwrappedLineFormatter::addNextStateToQueue(unsigned Penalty, - StateNode *PreviousNode, - bool NewLine, unsigned *Count, - QueueType *Queue) { - if (NewLine && !Indenter->canBreak(PreviousNode->State)) - return; - if (!NewLine && Indenter->mustBreak(PreviousNode->State)) - return; - - StateNode *Node = new (Allocator.Allocate()) - StateNode(PreviousNode->State, NewLine, PreviousNode); - if (!formatChildren(Node->State, NewLine, /*DryRun=*/true, Penalty)) - return; - - Penalty += Indenter->addTokenToState(Node->State, NewLine, true); - - Queue->push(QueueItem(OrderedPenalty(Penalty, *Count), Node)); - ++(*Count); -} - -bool UnwrappedLineFormatter::formatChildren(LineState &State, bool NewLine, - bool DryRun, unsigned &Penalty) { - FormatToken &Previous = *State.NextToken->Previous; - const FormatToken *LBrace = State.NextToken->getPreviousNonComment(); - if (!LBrace || LBrace->isNot(tok::l_brace) || LBrace->BlockKind != BK_Block || - Previous.Children.size() == 0) - // The previous token does not open a block. Nothing to do. We don't - // assert so that we can simply call this function for all tokens. - return true; - - if (NewLine) { - int AdditionalIndent = State.Stack.back().Indent - - Previous.Children[0]->Level * Style.IndentWidth; - - Penalty += format(Previous.Children, DryRun, AdditionalIndent, - /*FixBadIndentation=*/true); - return true; - } - - if (Previous.Children[0]->First->MustBreakBefore) - return false; - - // Cannot merge multiple statements into a single line. - if (Previous.Children.size() > 1) - return false; - - // Cannot merge into one line if this line ends on a comment. - if (Previous.is(tok::comment)) - return false; - - // We can't put the closing "}" on a line with a trailing comment. - if (Previous.Children[0]->Last->isTrailingComment()) - return false; - - // If the child line exceeds the column limit, we wouldn't want to merge it. - // We add +2 for the trailing " }". - if (Style.ColumnLimit > 0 && - Previous.Children[0]->Last->TotalLength + State.Column + 2 > - Style.ColumnLimit) - return false; - - if (!DryRun) { - Whitespaces->replaceWhitespace( - *Previous.Children[0]->First, - /*Newlines=*/0, /*IndentLevel=*/0, /*Spaces=*/1, - /*StartOfTokenColumn=*/State.Column, State.Line->InPPDirective); - } - Penalty += format(*Previous.Children[0], State.Column + 1, DryRun); - - State.Column += 1 + Previous.Children[0]->Last->TotalLength; - return true; +unsigned +UnwrappedLineFormatter::getColumnLimit(bool InPPDirective, + const AnnotatedLine *NextLine) const { + // In preprocessor directives reserve two chars for trailing " \" if the + // next line continues the preprocessor directive. + bool ContinuesPPDirective = + InPPDirective && + // If there is no next line, this is likely a child line and the parent + // continues the preprocessor directive. + (!NextLine || + (NextLine->InPPDirective && + // If there is an unescaped newline between this line and the next, the + // next line starts a new preprocessor directive. + !NextLine->First->HasUnescapedNewline)); + return Style.ColumnLimit - (ContinuesPPDirective ? 2 : 0); } } // namespace format diff --git a/contrib/llvm/tools/clang/lib/Format/UnwrappedLineFormatter.h b/contrib/llvm/tools/clang/lib/Format/UnwrappedLineFormatter.h index 3ae6dbc..da9aa1c 100644 --- a/contrib/llvm/tools/clang/lib/Format/UnwrappedLineFormatter.h +++ b/contrib/llvm/tools/clang/lib/Format/UnwrappedLineFormatter.h @@ -32,135 +32,39 @@ class UnwrappedLineFormatter { public: UnwrappedLineFormatter(ContinuationIndenter *Indenter, WhitespaceManager *Whitespaces, - const FormatStyle &Style) - : Indenter(Indenter), Whitespaces(Whitespaces), Style(Style) {} + const FormatStyle &Style, + const AdditionalKeywords &Keywords, + bool *IncompleteFormat) + : Indenter(Indenter), Whitespaces(Whitespaces), Style(Style), + Keywords(Keywords), IncompleteFormat(IncompleteFormat) {} - unsigned format(const SmallVectorImpl<AnnotatedLine *> &Lines, bool DryRun, - int AdditionalIndent = 0, bool FixBadIndentation = false); + /// \brief Format the current block and return the penalty. + unsigned format(const SmallVectorImpl<AnnotatedLine *> &Lines, + bool DryRun = false, int AdditionalIndent = 0, + bool FixBadIndentation = false); private: - /// \brief Formats an \c AnnotatedLine and returns the penalty. - /// - /// If \p DryRun is \c false, directly applies the changes. - unsigned format(const AnnotatedLine &Line, unsigned FirstIndent, - bool DryRun); - - /// \brief An edge in the solution space from \c Previous->State to \c State, - /// inserting a newline dependent on the \c NewLine. - struct StateNode { - StateNode(const LineState &State, bool NewLine, StateNode *Previous) - : State(State), NewLine(NewLine), Previous(Previous) {} - LineState State; - bool NewLine; - StateNode *Previous; - }; - - /// \brief A pair of <penalty, count> that is used to prioritize the BFS on. - /// - /// In case of equal penalties, we want to prefer states that were inserted - /// first. During state generation we make sure that we insert states first - /// that break the line as late as possible. - typedef std::pair<unsigned, unsigned> OrderedPenalty; - - /// \brief An item in the prioritized BFS search queue. The \c StateNode's - /// \c State has the given \c OrderedPenalty. - typedef std::pair<OrderedPenalty, StateNode *> QueueItem; - - /// \brief The BFS queue type. - typedef std::priority_queue<QueueItem, std::vector<QueueItem>, - std::greater<QueueItem> > QueueType; - - /// \brief Get the offset of the line relatively to the level. - /// - /// For example, 'public:' labels in classes are offset by 1 or 2 - /// characters to the left from their level. - int getIndentOffset(const FormatToken &RootToken) { - if (Style.Language == FormatStyle::LK_Java) - return 0; - if (RootToken.isAccessSpecifier(false) || RootToken.isObjCAccessSpecifier()) - return Style.AccessModifierOffset; - return 0; - } - /// \brief Add a new line and the required indent before the first Token /// of the \c UnwrappedLine if there was no structural parsing error. void formatFirstToken(FormatToken &RootToken, const AnnotatedLine *PreviousLine, unsigned IndentLevel, unsigned Indent, bool InPPDirective); - /// \brief Get the indent of \p Level from \p IndentForLevel. - /// - /// \p IndentForLevel must contain the indent for the level \c l - /// at \p IndentForLevel[l], or a value < 0 if the indent for - /// that level is unknown. - unsigned getIndent(ArrayRef<int> IndentForLevel, unsigned Level); - - void join(AnnotatedLine &A, const AnnotatedLine &B); - - unsigned getColumnLimit(bool InPPDirective) const { - // In preprocessor directives reserve two chars for trailing " \" - return Style.ColumnLimit - (InPPDirective ? 2 : 0); - } - - struct CompareLineStatePointers { - bool operator()(LineState *obj1, LineState *obj2) const { - return *obj1 < *obj2; - } - }; - - /// \brief Analyze the entire solution space starting from \p InitialState. - /// - /// This implements a variant of Dijkstra's algorithm on the graph that spans - /// the solution space (\c LineStates are the nodes). The algorithm tries to - /// find the shortest path (the one with lowest penalty) from \p InitialState - /// to a state where all tokens are placed. Returns the penalty. - /// - /// If \p DryRun is \c false, directly applies the changes. - unsigned analyzeSolutionSpace(LineState &InitialState, bool DryRun = false); - - void reconstructPath(LineState &State, StateNode *Current); - - /// \brief Add the following state to the analysis queue \c Queue. - /// - /// Assume the current state is \p PreviousNode and has been reached with a - /// penalty of \p Penalty. Insert a line break if \p NewLine is \c true. - void addNextStateToQueue(unsigned Penalty, StateNode *PreviousNode, - bool NewLine, unsigned *Count, QueueType *Queue); - - /// \brief If the \p State's next token is an r_brace closing a nested block, - /// format the nested block before it. - /// - /// Returns \c true if all children could be placed successfully and adapts - /// \p Penalty as well as \p State. If \p DryRun is false, also directly - /// creates changes using \c Whitespaces. - /// - /// The crucial idea here is that children always get formatted upon - /// encountering the closing brace right after the nested block. Now, if we - /// are currently trying to keep the "}" on the same line (i.e. \p NewLine is - /// \c false), the entire block has to be kept on the same line (which is only - /// possible if it fits on the line, only contains a single statement, etc. - /// - /// If \p NewLine is true, we format the nested block on separate lines, i.e. - /// break after the "{", format all lines with correct indentation and the put - /// the closing "}" on yet another new line. - /// - /// This enables us to keep the simple structure of the - /// \c UnwrappedLineFormatter, where we only have two options for each token: - /// break or don't break. - bool formatChildren(LineState &State, bool NewLine, bool DryRun, - unsigned &Penalty); - - ContinuationIndenter *Indenter; - WhitespaceManager *Whitespaces; - FormatStyle Style; - - llvm::SpecificBumpPtrAllocator<StateNode> Allocator; + /// \brief Returns the column limit for a line, taking into account whether we + /// need an escaped newline due to a continued preprocessor directive. + unsigned getColumnLimit(bool InPPDirective, const AnnotatedLine *NextLine) const; // Cache to store the penalty of formatting a vector of AnnotatedLines // starting from a specific additional offset. Improves performance if there // are many nested blocks. std::map<std::pair<const SmallVectorImpl<AnnotatedLine *> *, unsigned>, unsigned> PenaltyCache; + + ContinuationIndenter *Indenter; + WhitespaceManager *Whitespaces; + const FormatStyle &Style; + const AdditionalKeywords &Keywords; + bool *IncompleteFormat; }; } // end namespace format } // end namespace clang diff --git a/contrib/llvm/tools/clang/lib/Format/UnwrappedLineParser.cpp b/contrib/llvm/tools/clang/lib/Format/UnwrappedLineParser.cpp index ec04af5..939528f 100644 --- a/contrib/llvm/tools/clang/lib/Format/UnwrappedLineParser.cpp +++ b/contrib/llvm/tools/clang/lib/Format/UnwrappedLineParser.cpp @@ -14,7 +14,9 @@ //===----------------------------------------------------------------------===// #include "UnwrappedLineParser.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" #define DEBUG_TYPE "format-parser" @@ -56,22 +58,20 @@ private: class ScopedMacroState : public FormatTokenSource { public: ScopedMacroState(UnwrappedLine &Line, FormatTokenSource *&TokenSource, - FormatToken *&ResetToken, bool &StructuralError) + FormatToken *&ResetToken) : Line(Line), TokenSource(TokenSource), ResetToken(ResetToken), PreviousLineLevel(Line.Level), PreviousTokenSource(TokenSource), - StructuralError(StructuralError), - PreviousStructuralError(StructuralError), Token(nullptr) { + Token(nullptr) { TokenSource = this; Line.Level = 0; Line.InPPDirective = true; } - ~ScopedMacroState() { + ~ScopedMacroState() override { TokenSource = PreviousTokenSource; ResetToken = Token; Line.InPPDirective = false; Line.Level = PreviousLineLevel; - StructuralError = PreviousStructuralError; } FormatToken *getNextToken() override { @@ -110,8 +110,6 @@ private: FormatToken *&ResetToken; unsigned PreviousLineLevel; FormatTokenSource *PreviousTokenSource; - bool &StructuralError; - bool PreviousStructuralError; FormatToken *Token; }; @@ -206,9 +204,8 @@ UnwrappedLineParser::UnwrappedLineParser(const FormatStyle &Style, ArrayRef<FormatToken *> Tokens, UnwrappedLineConsumer &Callback) : Line(new UnwrappedLine), MustBreakBeforeNextToken(false), - CurrentLines(&Lines), StructuralError(false), Style(Style), - Keywords(Keywords), Tokens(nullptr), Callback(Callback), - AllTokens(Tokens), PPBranchLevel(-1) {} + CurrentLines(&Lines), Style(Style), Keywords(Keywords), Tokens(nullptr), + Callback(Callback), AllTokens(Tokens), PPBranchLevel(-1) {} void UnwrappedLineParser::reset() { PPBranchLevel = -1; @@ -219,11 +216,10 @@ void UnwrappedLineParser::reset() { PreprocessorDirectives.clear(); CurrentLines = &Lines; DeclarationScopeStack.clear(); - StructuralError = false; PPStack.clear(); } -bool UnwrappedLineParser::parse() { +void UnwrappedLineParser::parse() { IndexedTokenSource TokenSource(AllTokens); do { DEBUG(llvm::dbgs() << "----\n"); @@ -256,13 +252,15 @@ bool UnwrappedLineParser::parse() { } } while (!PPLevelBranchIndex.empty()); - return StructuralError; } void UnwrappedLineParser::parseFile() { - ScopedDeclarationState DeclarationState( - *Line, DeclarationScopeStack, - /*MustBeDeclaration=*/ !Line->InPPDirective); + // The top-level context in a file always has declarations, except for pre- + // processor directives and JavaScript files. + bool MustBeDeclaration = + !Line->InPPDirective && Style.Language != FormatStyle::LK_JavaScript; + ScopedDeclarationState DeclarationState(*Line, DeclarationScopeStack, + MustBeDeclaration); parseLevel(/*HasOpeningBrace=*/false); // Make sure to format the remaining tokens. flushComments(true); @@ -286,7 +284,6 @@ void UnwrappedLineParser::parseLevel(bool HasOpeningBrace) { case tok::r_brace: if (HasOpeningBrace) return; - StructuralError = true; nextToken(); addUnwrappedLine(); break; @@ -305,7 +302,7 @@ void UnwrappedLineParser::parseLevel(bool HasOpeningBrace) { } while (!eof()); } -void UnwrappedLineParser::calculateBraceTypes() { +void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) { // We'll parse forward through the tokens until we hit // a closing brace or eof - note that getNextToken() will // parse macros, so this will magically work inside macro @@ -328,6 +325,7 @@ void UnwrappedLineParser::calculateBraceTypes() { switch (Tok->Tok.getKind()) { case tok::l_brace: + Tok->BlockKind = BK_Unknown; LBraceStack.push_back(Tok); break; case tok::r_brace: @@ -351,9 +349,11 @@ void UnwrappedLineParser::calculateBraceTypes() { // // We exclude + and - as they can be ObjC visibility modifiers. ProbablyBracedList = - NextTok->isOneOf(tok::comma, tok::semi, tok::period, tok::colon, + NextTok->isOneOf(tok::comma, tok::period, tok::colon, tok::r_paren, tok::r_square, tok::l_brace, tok::l_paren, tok::ellipsis) || + (NextTok->is(tok::semi) && + (!ExpectClassBody || LBraceStack.size() != 1)) || (NextTok->isBinaryOperator() && !NextIsObjCMethod); } if (ProbablyBracedList) { @@ -374,6 +374,7 @@ void UnwrappedLineParser::calculateBraceTypes() { case tok::kw_for: case tok::kw_switch: case tok::kw_try: + case tok::kw___try: if (!LBraceStack.empty()) LBraceStack.back()->BlockKind = BK_Block; break; @@ -407,7 +408,6 @@ void UnwrappedLineParser::parseBlock(bool MustBeDeclaration, bool AddLevel, if (!FormatTok->Tok.is(tok::r_brace)) { Line->Level = InitialLevel; - StructuralError = true; return; } @@ -417,7 +417,7 @@ void UnwrappedLineParser::parseBlock(bool MustBeDeclaration, bool AddLevel, Line->Level = InitialLevel; } -static bool IsGoogScope(const UnwrappedLine &Line) { +static bool isGoogScope(const UnwrappedLine &Line) { // FIXME: Closure-library specific stuff should not be hard-coded but be // configurable. if (Line.Tokens.size() < 4) @@ -453,12 +453,13 @@ void UnwrappedLineParser::parseChildBlock() { nextToken(); { bool GoogScope = - Style.Language == FormatStyle::LK_JavaScript && IsGoogScope(*Line); + Style.Language == FormatStyle::LK_JavaScript && isGoogScope(*Line); ScopedLineState LineState(*this); ScopedDeclarationState DeclarationState(*Line, DeclarationScopeStack, /*MustBeDeclaration=*/false); Line->Level += GoogScope ? 0 : 1; parseLevel(/*HasOpeningBrace=*/true); + flushComments(isOnNewLine(*FormatTok)); Line->Level -= GoogScope ? 0 : 1; } nextToken(); @@ -466,7 +467,7 @@ void UnwrappedLineParser::parseChildBlock() { void UnwrappedLineParser::parsePPDirective() { assert(FormatTok->Tok.is(tok::hash) && "'#' expected"); - ScopedMacroState MacroState(*Line, Tokens, FormatTok, StructuralError); + ScopedMacroState MacroState(*Line, Tokens, FormatTok); nextToken(); if (!FormatTok->Tok.getIdentifierInfo()) { @@ -549,6 +550,7 @@ void UnwrappedLineParser::conditionalCompilationEnd() { void UnwrappedLineParser::parsePPIf(bool IfDef) { nextToken(); bool IsLiteralFalse = (FormatTok->Tok.isLiteral() && + FormatTok->Tok.getLiteralData() != nullptr && StringRef(FormatTok->Tok.getLiteralData(), FormatTok->Tok.getLength()) == "0") || FormatTok->Tok.is(tok::kw_false); @@ -602,7 +604,7 @@ void UnwrappedLineParser::parsePPUnknown() { // Here we blacklist certain tokens that are not usually the first token in an // unwrapped line. This is used in attempt to distinguish macro calls without // trailing semicolons from other constructs split to several lines. -bool tokenCanStartNewLine(clang::Token Tok) { +static bool tokenCanStartNewLine(const clang::Token &Tok) { // Semicolon can be a null-statement, l_square can be a start of a macro or // a C++11 attribute, but this doesn't seem to be common. return Tok.isNot(tok::semi) && Tok.isNot(tok::l_brace) && @@ -655,6 +657,11 @@ void UnwrappedLineParser::parseStructuralElement() { nextToken(); addUnwrappedLine(); return; + case tok::objc_try: + // This branch isn't strictly necessary (the kw_try case below would + // do this too after the tok::at is parsed above). But be explicit. + parseTryCatch(); + return; default: break; } @@ -662,10 +669,13 @@ void UnwrappedLineParser::parseStructuralElement() { case tok::kw_asm: nextToken(); if (FormatTok->is(tok::l_brace)) { + FormatTok->Type = TT_InlineASMBrace; nextToken(); while (FormatTok && FormatTok->isNot(tok::eof)) { if (FormatTok->is(tok::r_brace)) { + FormatTok->Type = TT_InlineASMBrace; nextToken(); + addUnwrappedLine(); break; } FormatTok->Finalized = true; @@ -686,7 +696,8 @@ void UnwrappedLineParser::parseStructuralElement() { case tok::kw_public: case tok::kw_protected: case tok::kw_private: - if (Style.Language == FormatStyle::LK_Java) + if (Style.Language == FormatStyle::LK_Java || + Style.Language == FormatStyle::LK_JavaScript) nextToken(); else parseAccessSpecifier(); @@ -712,6 +723,7 @@ void UnwrappedLineParser::parseStructuralElement() { parseCaseLabel(); return; case tok::kw_try: + case tok::kw___try: parseTryCatch(); return; case tok::kw_extern: @@ -725,11 +737,30 @@ void UnwrappedLineParser::parseStructuralElement() { } } break; + case tok::kw_export: + if (Style.Language == FormatStyle::LK_JavaScript) { + parseJavaScriptEs6ImportExport(); + return; + } + break; case tok::identifier: - if (FormatTok->IsForEachMacro) { + if (FormatTok->is(TT_ForEachMacro)) { parseForOrWhileLoop(); return; } + if (Style.Language == FormatStyle::LK_JavaScript && + FormatTok->is(Keywords.kw_import)) { + parseJavaScriptEs6ImportExport(); + return; + } + if (FormatTok->is(Keywords.kw_signals)) { + nextToken(); + if (FormatTok->is(tok::colon)) { + nextToken(); + addUnwrappedLine(); + } + return; + } // In all other cases, parse the declaration. break; default: @@ -806,26 +837,42 @@ void UnwrappedLineParser::parseStructuralElement() { parseTryCatch(); return; case tok::identifier: { - StringRef Text = FormatTok->TokenText; // Parse function literal unless 'function' is the first token in a line // in which case this should be treated as a free-standing function. - if (Style.Language == FormatStyle::LK_JavaScript && Text == "function" && - Line->Tokens.size() > 0) { + if (Style.Language == FormatStyle::LK_JavaScript && + FormatTok->is(Keywords.kw_function) && Line->Tokens.size() > 0) { tryToParseJSFunction(); break; } + if ((Style.Language == FormatStyle::LK_JavaScript || + Style.Language == FormatStyle::LK_Java) && + FormatTok->is(Keywords.kw_interface)) { + parseRecord(); + break; + } + + StringRef Text = FormatTok->TokenText; nextToken(); - if (Line->Tokens.size() == 1) { - if (FormatTok->Tok.is(tok::colon)) { + if (Line->Tokens.size() == 1 && + // JS doesn't have macros, and within classes colons indicate fields, + // not labels. + Style.Language != FormatStyle::LK_JavaScript) { + if (FormatTok->Tok.is(tok::colon) && !Line->MustBeDeclaration) { parseLabel(); return; } // Recognize function-like macro usages without trailing semicolon as - // well as free-standing macrose like Q_OBJECT. + // well as free-standing macros like Q_OBJECT. bool FunctionLike = FormatTok->is(tok::l_paren); if (FunctionLike) parseParens(); - if (FormatTok->NewlinesBefore > 0 && + + bool FollowedByNewline = + CommentsBeforeNextToken.empty() + ? FormatTok->NewlinesBefore > 0 + : CommentsBeforeNextToken.front()->NewlinesBefore > 0; + + if (FollowedByNewline && (Text.size() >= 5 || FunctionLike) && tokenCanStartNewLine(FormatTok->Tok) && Text == Text.upper()) { addUnwrappedLine(); @@ -835,6 +882,17 @@ void UnwrappedLineParser::parseStructuralElement() { break; } case tok::equal: + // Fat arrows (=>) have tok::TokenKind tok::equal but TokenType + // TT_JsFatArrow. The always start an expression or a child block if + // followed by a curly. + if (FormatTok->is(TT_JsFatArrow)) { + nextToken(); + if (FormatTok->is(tok::l_brace)) { + parseChildBlock(); + } + break; + } + nextToken(); if (FormatTok->Tok.is(tok::l_brace)) { parseBracedList(); @@ -843,6 +901,9 @@ void UnwrappedLineParser::parseStructuralElement() { case tok::l_square: parseSquare(); break; + case tok::kw_new: + parseNew(); + break; default: nextToken(); break; @@ -952,22 +1013,48 @@ void UnwrappedLineParser::tryToParseJSFunction() { // Consume function name. if (FormatTok->is(tok::identifier)) - nextToken(); + nextToken(); if (FormatTok->isNot(tok::l_paren)) return; - nextToken(); - while (FormatTok->isNot(tok::l_brace)) { - // Err on the side of caution in order to avoid consuming the full file in - // case of incomplete code. - if (!FormatTok->isOneOf(tok::identifier, tok::comma, tok::r_paren, - tok::comment)) - return; + + // Parse formal parameter list. + parseBalanced(tok::l_paren, tok::r_paren); + + if (FormatTok->is(tok::colon)) { + // Parse a type definition. nextToken(); + + // Eat the type declaration. For braced inline object types, balance braces, + // otherwise just parse until finding an l_brace for the function body. + if (FormatTok->is(tok::l_brace)) { + parseBalanced(tok::l_brace, tok::r_brace); + } else { + while(FormatTok->isNot(tok::l_brace) && !eof()) { + nextToken(); + } + } } + parseChildBlock(); } +void UnwrappedLineParser::parseBalanced(tok::TokenKind OpenKind, + tok::TokenKind CloseKind) { + assert(FormatTok->is(OpenKind)); + nextToken(); + int Depth = 1; + while (Depth > 0 && !eof()) { + // Parse the formal parameter list. + if (FormatTok->is(OpenKind)) { + ++Depth; + } else if (FormatTok->is(CloseKind)) { + --Depth; + } + nextToken(); + } +} + bool UnwrappedLineParser::tryToParseBracedList() { if (FormatTok->BlockKind == BK_Unknown) calculateBraceTypes(); @@ -985,10 +1072,19 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons) { // FIXME: Once we have an expression parser in the UnwrappedLineParser, // replace this by using parseAssigmentExpression() inside. do { - if (Style.Language == FormatStyle::LK_JavaScript && - FormatTok->is(Keywords.kw_function)) { - tryToParseJSFunction(); - continue; + if (Style.Language == FormatStyle::LK_JavaScript) { + if (FormatTok->is(Keywords.kw_function)) { + tryToParseJSFunction(); + continue; + } else if (FormatTok->is(TT_JsFatArrow)) { + nextToken(); + // Fat arrows can be followed by simple expressions or by child blocks + // in curly braces. + if (FormatTok->is(tok::l_brace)){ + parseChildBlock(); + continue; + } + } } switch (FormatTok->Tok.getKind()) { case tok::caret: @@ -1006,6 +1102,17 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons) { FormatTok->BlockKind = BK_BracedInit; parseBracedList(); break; + case tok::r_paren: + // JavaScript can just have free standing methods and getters/setters in + // object literals. Detect them by a "{" following ")". + if (Style.Language == FormatStyle::LK_JavaScript) { + nextToken(); + if (FormatTok->is(tok::l_brace)) + parseChildBlock(); + break; + } + nextToken(); + break; case tok::r_brace: nextToken(); return !HasError; @@ -1046,9 +1153,8 @@ void UnwrappedLineParser::parseParens() { tryToParseLambda(); break; case tok::l_brace: - if (!tryToParseBracedList()) { + if (!tryToParseBracedList()) parseChildBlock(); - } break; case tok::at: nextToken(); @@ -1088,9 +1194,8 @@ void UnwrappedLineParser::parseSquare() { parseSquare(); break; case tok::l_brace: { - if (!tryToParseBracedList()) { + if (!tryToParseBracedList()) parseChildBlock(); - } break; } case tok::at: @@ -1148,7 +1253,7 @@ void UnwrappedLineParser::parseIfThenElse() { } void UnwrappedLineParser::parseTryCatch() { - assert(FormatTok->is(tok::kw_try) && "'try' expected"); + assert(FormatTok->isOneOf(tok::kw_try, tok::kw___try) && "'try' expected"); nextToken(); bool NeedsUnwrappedLine = false; if (FormatTok->is(tok::colon)) { @@ -1158,8 +1263,6 @@ void UnwrappedLineParser::parseTryCatch() { nextToken(); if (FormatTok->is(tok::l_paren)) parseParens(); - else - StructuralError = true; if (FormatTok->is(tok::comma)) nextToken(); } @@ -1182,23 +1285,29 @@ void UnwrappedLineParser::parseTryCatch() { // The C++ standard requires a compound-statement after a try. // If there's none, we try to assume there's a structuralElement // and try to continue. - StructuralError = true; addUnwrappedLine(); ++Line->Level; parseStructuralElement(); --Line->Level; } - while (FormatTok->is(tok::kw_catch) || - ((Style.Language == FormatStyle::LK_Java || - Style.Language == FormatStyle::LK_JavaScript) && - FormatTok->is(Keywords.kw_finally))) { + while (1) { + if (FormatTok->is(tok::at)) + nextToken(); + if (!(FormatTok->isOneOf(tok::kw_catch, Keywords.kw___except, + tok::kw___finally) || + ((Style.Language == FormatStyle::LK_Java || + Style.Language == FormatStyle::LK_JavaScript) && + FormatTok->is(Keywords.kw_finally)) || + (FormatTok->Tok.isObjCAtKeyword(tok::objc_catch) || + FormatTok->Tok.isObjCAtKeyword(tok::objc_finally)))) + break; nextToken(); while (FormatTok->isNot(tok::l_brace)) { if (FormatTok->is(tok::l_paren)) { parseParens(); continue; } - if (FormatTok->isOneOf(tok::semi, tok::r_brace)) + if (FormatTok->isOneOf(tok::semi, tok::r_brace, tok::eof)) return; nextToken(); } @@ -1242,9 +1351,33 @@ void UnwrappedLineParser::parseNamespace() { // FIXME: Add error handling. } +void UnwrappedLineParser::parseNew() { + assert(FormatTok->is(tok::kw_new) && "'new' expected"); + nextToken(); + if (Style.Language != FormatStyle::LK_Java) + return; + + // In Java, we can parse everything up to the parens, which aren't optional. + do { + // There should not be a ;, { or } before the new's open paren. + if (FormatTok->isOneOf(tok::semi, tok::l_brace, tok::r_brace)) + return; + + // Consume the parens. + if (FormatTok->is(tok::l_paren)) { + parseParens(); + + // If there is a class body of an anonymous class, consume that as child. + if (FormatTok->is(tok::l_brace)) + parseChildBlock(); + return; + } + nextToken(); + } while (!eof()); +} + void UnwrappedLineParser::parseForOrWhileLoop() { - assert((FormatTok->Tok.is(tok::kw_for) || FormatTok->Tok.is(tok::kw_while) || - FormatTok->IsForEachMacro) && + assert(FormatTok->isOneOf(tok::kw_for, tok::kw_while, TT_ForEachMacro) && "'for', 'while' or foreach macro expected"); nextToken(); if (FormatTok->Tok.is(tok::l_paren)) @@ -1304,6 +1437,8 @@ void UnwrappedLineParser::parseLabel() { } addUnwrappedLine(); } else { + if (FormatTok->is(tok::semi)) + nextToken(); addUnwrappedLine(); } Line->Level = OldLineLevel; @@ -1338,8 +1473,7 @@ void UnwrappedLineParser::parseSwitch() { void UnwrappedLineParser::parseAccessSpecifier() { nextToken(); // Understand Qt's slots. - if (FormatTok->is(tok::identifier) && - (FormatTok->TokenText == "slots" || FormatTok->TokenText == "Q_SLOTS")) + if (FormatTok->isOneOf(Keywords.kw_slots, Keywords.kw_qslots)) nextToken(); // Otherwise, we don't know what it is, and we'd better keep the next token. if (FormatTok->Tok.is(tok::colon)) @@ -1455,37 +1589,45 @@ void UnwrappedLineParser::parseJavaEnumBody() { void UnwrappedLineParser::parseRecord() { const FormatToken &InitialToken = *FormatTok; nextToken(); - if (FormatTok->isOneOf(tok::identifier, tok::coloncolon, tok::kw___attribute, - tok::kw___declspec, tok::kw_alignas)) { + + + // The actual identifier can be a nested name specifier, and in macros + // it is often token-pasted. + while (FormatTok->isOneOf(tok::identifier, tok::coloncolon, tok::hashhash, + tok::kw___attribute, tok::kw___declspec, + tok::kw_alignas) || + ((Style.Language == FormatStyle::LK_Java || + Style.Language == FormatStyle::LK_JavaScript) && + FormatTok->isOneOf(tok::period, tok::comma))) { + bool IsNonMacroIdentifier = + FormatTok->is(tok::identifier) && + FormatTok->TokenText != FormatTok->TokenText.upper(); nextToken(); // We can have macros or attributes in between 'class' and the class name. - if (FormatTok->Tok.is(tok::l_paren)) { + if (!IsNonMacroIdentifier && FormatTok->Tok.is(tok::l_paren)) parseParens(); - } - // The actual identifier can be a nested name specifier, and in macros - // it is often token-pasted. - while (FormatTok->is(tok::identifier) || FormatTok->is(tok::coloncolon) || - FormatTok->is(tok::hashhash) || - (Style.Language == FormatStyle::LK_Java && - FormatTok->isOneOf(tok::period, tok::comma))) - nextToken(); + } - // Note that parsing away template declarations here leads to incorrectly - // accepting function declarations as record declarations. - // In general, we cannot solve this problem. Consider: - // class A<int> B() {} - // which can be a function definition or a class definition when B() is a - // macro. If we find enough real-world cases where this is a problem, we - // can parse for the 'template' keyword in the beginning of the statement, - // and thus rule out the record production in case there is no template - // (this would still leave us with an ambiguity between template function - // and class declarations). - if (FormatTok->Tok.is(tok::colon) || FormatTok->Tok.is(tok::less)) { - while (!eof() && FormatTok->Tok.isNot(tok::l_brace)) { - if (FormatTok->Tok.is(tok::semi)) - return; - nextToken(); + // Note that parsing away template declarations here leads to incorrectly + // accepting function declarations as record declarations. + // In general, we cannot solve this problem. Consider: + // class A<int> B() {} + // which can be a function definition or a class definition when B() is a + // macro. If we find enough real-world cases where this is a problem, we + // can parse for the 'template' keyword in the beginning of the statement, + // and thus rule out the record production in case there is no template + // (this would still leave us with an ambiguity between template function + // and class declarations). + if (FormatTok->isOneOf(tok::colon, tok::less)) { + while (!eof()) { + if (FormatTok->is(tok::l_brace)) { + calculateBraceTypes(/*ExpectClassBody=*/true); + if (!tryToParseBracedList()) + break; } + if (FormatTok->Tok.is(tok::semi)) + return; + nextToken(); } } if (FormatTok->Tok.is(tok::l_brace)) { @@ -1498,8 +1640,9 @@ void UnwrappedLineParser::parseRecord() { // We fall through to parsing a structural element afterwards, so // class A {} n, m; // will end up in one unwrapped line. - // This does not apply for Java. - if (Style.Language == FormatStyle::LK_Java) + // This does not apply for Java and JavaScript. + if (Style.Language == FormatStyle::LK_Java || + Style.Language == FormatStyle::LK_JavaScript) addUnwrappedLine(); } @@ -1578,6 +1721,35 @@ void UnwrappedLineParser::parseObjCProtocol() { parseObjCUntilAtEnd(); } +void UnwrappedLineParser::parseJavaScriptEs6ImportExport() { + assert(FormatTok->isOneOf(Keywords.kw_import, tok::kw_export)); + nextToken(); + + // Consume the "default" in "export default class/function". + if (FormatTok->is(tok::kw_default)) + nextToken(); + + // Consume "function" and "default function", so that these get parsed as + // free-standing JS functions, i.e. do not require a trailing semicolon. + if (FormatTok->is(Keywords.kw_function)) { + nextToken(); + return; + } + + if (FormatTok->isOneOf(tok::kw_const, tok::kw_class, Keywords.kw_var)) + return; // Fall through to parsing the corresponding structure. + + if (FormatTok->is(tok::l_brace)) { + FormatTok->BlockKind = BK_Block; + parseBracedList(); + } + + while (!eof() && FormatTok->isNot(tok::semi) && + FormatTok->isNot(tok::l_brace)) { + nextToken(); + } +} + LLVM_ATTRIBUTE_UNUSED static void printDebugInfo(const UnwrappedLine &Line, StringRef Prefix = "") { llvm::dbgs() << Prefix << "Line(" << Line.Level << ")" @@ -1634,14 +1806,12 @@ void UnwrappedLineParser::flushComments(bool NewlineBeforeNext) { I = CommentsBeforeNextToken.begin(), E = CommentsBeforeNextToken.end(); I != E; ++I) { - if (isOnNewLine(**I) && JustComments) { + if (isOnNewLine(**I) && JustComments) addUnwrappedLine(); - } pushToken(*I); } - if (NewlineBeforeNext && JustComments) { + if (NewlineBeforeNext && JustComments) addUnwrappedLine(); - } CommentsBeforeNextToken.clear(); } @@ -1662,8 +1832,7 @@ void UnwrappedLineParser::readToken() { (FormatTok->HasUnescapedNewline || FormatTok->IsFirst)) { // If there is an unfinished unwrapped line, we flush the preprocessor // directives only after that unwrapped line was finished later. - bool SwitchToPreprocessorLines = - !Line->Tokens.empty() && CurrentLines == &Lines; + bool SwitchToPreprocessorLines = !Line->Tokens.empty(); ScopedLineState BlockState(*this, SwitchToPreprocessorLines); // Comments stored before the preprocessor directive need to be output // before the preprocessor directive, at the same level as the diff --git a/contrib/llvm/tools/clang/lib/Format/UnwrappedLineParser.h b/contrib/llvm/tools/clang/lib/Format/UnwrappedLineParser.h index 3218afe..6a6e56f 100644 --- a/contrib/llvm/tools/clang/lib/Format/UnwrappedLineParser.h +++ b/contrib/llvm/tools/clang/lib/Format/UnwrappedLineParser.h @@ -65,8 +65,7 @@ public: ArrayRef<FormatToken *> Tokens, UnwrappedLineConsumer &Callback); - /// Returns true in case of a structural error. - bool parse(); + void parse(); private: void reset(); @@ -95,6 +94,7 @@ private: void parseCaseLabel(); void parseSwitch(); void parseNamespace(); + void parseNew(); void parseAccessSpecifier(); void parseEnum(); void parseJavaEnumBody(); @@ -103,16 +103,22 @@ private: void parseObjCUntilAtEnd(); void parseObjCInterfaceOrImplementation(); void parseObjCProtocol(); + void parseJavaScriptEs6ImportExport(); bool tryToParseLambda(); bool tryToParseLambdaIntroducer(); void tryToParseJSFunction(); + /// \brief Parses tokens until encountering the CloseKind token, but balances + /// tokens when encountering more OpenKind tokens. Useful for e.g. parsing a + /// curly brace delimited block that can contain nested blocks. + /// The parser must be positioned on a token of OpenKind. + void parseBalanced(tok::TokenKind OpenKind, tok::TokenKind CloseKind); void addUnwrappedLine(); bool eof() const; void nextToken(); void readToken(); void flushComments(bool NewlineBeforeNext); void pushToken(FormatToken *Tok); - void calculateBraceTypes(); + void calculateBraceTypes(bool ExpectClassBody = false); // Marks a conditional compilation edge (for example, an '#if', '#ifdef', // '#else' or merge conflict marker). If 'Unreachable' is true, assumes @@ -156,10 +162,6 @@ private: // whether we are in a compound statement or not. std::vector<bool> DeclarationScopeStack; - // Will be true if we encounter an error that leads to possibily incorrect - // indentation levels. - bool StructuralError; - const FormatStyle &Style; const AdditionalKeywords &Keywords; diff --git a/contrib/llvm/tools/clang/lib/Format/WhitespaceManager.cpp b/contrib/llvm/tools/clang/lib/Format/WhitespaceManager.cpp index bf1207e..4baaab1 100644 --- a/contrib/llvm/tools/clang/lib/Format/WhitespaceManager.cpp +++ b/contrib/llvm/tools/clang/lib/Format/WhitespaceManager.cpp @@ -36,7 +36,9 @@ WhitespaceManager::Change::Change( PreviousLinePostfix(PreviousLinePostfix), CurrentLinePrefix(CurrentLinePrefix), Kind(Kind), ContinuesPPDirective(ContinuesPPDirective), IndentLevel(IndentLevel), - Spaces(Spaces) {} + Spaces(Spaces), IsTrailingComment(false), TokenLength(0), + PreviousEndOfTokenColumn(0), EscapedNewlineColumn(0), + StartOfBlockComment(nullptr), IndentationOffset(0) {} void WhitespaceManager::reset() { Changes.clear(); @@ -91,6 +93,7 @@ const tooling::Replacements &WhitespaceManager::generateReplacements() { std::sort(Changes.begin(), Changes.end(), Change::IsBeforeInFile(SourceMgr)); calculateLineBreakInformation(); + alignConsecutiveAssignments(); alignTrailingComments(); alignEscapedNewlines(); generateChanges(); @@ -139,6 +142,96 @@ void WhitespaceManager::calculateLineBreakInformation() { } } +// Walk through all of the changes and find sequences of "=" to align. To do +// so, keep track of the lines and whether or not an "=" was found on align. If +// a "=" is found on a line, extend the current sequence. If the current line +// cannot be part of a sequence, e.g. because there is an empty line before it +// or it contains non-assignments, finalize the previous sequence. +void WhitespaceManager::alignConsecutiveAssignments() { + if (!Style.AlignConsecutiveAssignments) + return; + + unsigned MinColumn = 0; + unsigned StartOfSequence = 0; + unsigned EndOfSequence = 0; + bool FoundAssignmentOnLine = false; + bool FoundLeftParenOnLine = false; + unsigned CurrentLine = 0; + + auto AlignSequence = [&] { + alignConsecutiveAssignments(StartOfSequence, EndOfSequence, MinColumn); + MinColumn = 0; + StartOfSequence = 0; + EndOfSequence = 0; + }; + + for (unsigned i = 0, e = Changes.size(); i != e; ++i) { + if (Changes[i].NewlinesBefore != 0) { + CurrentLine += Changes[i].NewlinesBefore; + if (StartOfSequence > 0 && + (Changes[i].NewlinesBefore > 1 || !FoundAssignmentOnLine)) { + EndOfSequence = i; + AlignSequence(); + } + FoundAssignmentOnLine = false; + FoundLeftParenOnLine = false; + } + + if ((Changes[i].Kind == tok::equal && + (FoundAssignmentOnLine || ((Changes[i].NewlinesBefore > 0 || + Changes[i + 1].NewlinesBefore > 0)))) || + (!FoundLeftParenOnLine && Changes[i].Kind == tok::r_paren)) { + if (StartOfSequence > 0) + AlignSequence(); + } else if (Changes[i].Kind == tok::l_paren) { + FoundLeftParenOnLine = true; + if (!FoundAssignmentOnLine && StartOfSequence > 0) + AlignSequence(); + } else if (!FoundAssignmentOnLine && !FoundLeftParenOnLine && + Changes[i].Kind == tok::equal) { + FoundAssignmentOnLine = true; + EndOfSequence = i; + if (StartOfSequence == 0) + StartOfSequence = i; + + unsigned ChangeMinColumn = Changes[i].StartOfTokenColumn; + MinColumn = std::max(MinColumn, ChangeMinColumn); + } + } + + if (StartOfSequence > 0) { + EndOfSequence = Changes.size(); + AlignSequence(); + } +} + +void WhitespaceManager::alignConsecutiveAssignments(unsigned Start, + unsigned End, + unsigned Column) { + bool AlignedAssignment = false; + int PreviousShift = 0; + for (unsigned i = Start; i != End; ++i) { + int Shift = 0; + if (Changes[i].NewlinesBefore > 0) + AlignedAssignment = false; + if (!AlignedAssignment && Changes[i].Kind == tok::equal) { + Shift = Column - Changes[i].StartOfTokenColumn; + AlignedAssignment = true; + PreviousShift = Shift; + } + assert(Shift >= 0); + Changes[i].Spaces += Shift; + if (i + 1 != Changes.size()) + Changes[i + 1].PreviousEndOfTokenColumn += Shift; + Changes[i].StartOfTokenColumn += Shift; + if (AlignedAssignment) { + Changes[i].StartOfTokenColumn += PreviousShift; + if (i + 1 != Changes.size()) + Changes[i + 1].PreviousEndOfTokenColumn += PreviousShift; + } + } +} + void WhitespaceManager::alignTrailingComments() { unsigned MinColumn = 0; unsigned MaxColumn = UINT_MAX; @@ -264,6 +357,11 @@ void WhitespaceManager::alignEscapedNewlines(unsigned Start, unsigned End, void WhitespaceManager::generateChanges() { for (unsigned i = 0, e = Changes.size(); i != e; ++i) { const Change &C = Changes[i]; + if (i > 0) { + assert(Changes[i - 1].OriginalWhitespaceRange.getBegin() != + C.OriginalWhitespaceRange.getBegin() && + "Generating two replacements for the same location"); + } if (C.CreateReplacement) { std::string ReplacementText = C.PreviousLinePostfix; if (C.ContinuesPPDirective) diff --git a/contrib/llvm/tools/clang/lib/Format/WhitespaceManager.h b/contrib/llvm/tools/clang/lib/Format/WhitespaceManager.h index 28730d4..4bfc813 100644 --- a/contrib/llvm/tools/clang/lib/Format/WhitespaceManager.h +++ b/contrib/llvm/tools/clang/lib/Format/WhitespaceManager.h @@ -164,6 +164,13 @@ private: /// \c EscapedNewlineColumn for the first tokens or token parts in a line. void calculateLineBreakInformation(); + /// \brief Align consecutive assignments over all \c Changes. + void alignConsecutiveAssignments(); + + /// \brief Align consecutive assignments from change \p Start to change \p End at + /// the specified \p Column. + void alignConsecutiveAssignments(unsigned Start, unsigned End, unsigned Column); + /// \brief Align trailing comments over all \c Changes. void alignTrailingComments(); diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp index f53c614..52776b6 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp @@ -21,7 +21,6 @@ #include "clang/Basic/Diagnostic.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/SourceManager.h" -#include "llvm/IR/Module.h" #include "llvm/Support/Path.h" #include "llvm/Support/Timer.h" #include "llvm/Support/raw_ostream.h" diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp index 216ac6a..b84df94 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp @@ -57,6 +57,7 @@ void ASTMergeAction::ExecuteAction() { /*MinimalImport=*/false); TranslationUnitDecl *TU = Unit->getASTContext().getTranslationUnitDecl(); + CI.getASTConsumer().Initialize(CI.getASTContext()); for (auto *D : TU->decls()) { // Don't re-import __va_list_tag, __builtin_va_list. if (const auto *ND = dyn_cast<NamedDecl>(D)) @@ -64,7 +65,12 @@ void ASTMergeAction::ExecuteAction() { if (II->isStr("__va_list_tag") || II->isStr("__builtin_va_list")) continue; - Importer.Import(D); + Decl *ToD = Importer.Import(D); + + if (ToD) { + DeclGroupRef DGR(ToD); + CI.getASTConsumer().HandleTopLevelDecl(DGR); + } } } diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp index a3998fa..7226344 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp @@ -120,11 +120,10 @@ static OnDiskDataMap &getOnDiskDataMap() { static void cleanupOnDiskMapAtExit() { // Use the mutex because there can be an alive thread destroying an ASTUnit. llvm::MutexGuard Guard(getOnDiskMutex()); - OnDiskDataMap &M = getOnDiskDataMap(); - for (OnDiskDataMap::iterator I = M.begin(), E = M.end(); I != E; ++I) { + for (const auto &I : getOnDiskDataMap()) { // We don't worry about freeing the memory associated with OnDiskDataMap. // All we care about is erasing stale files. - I->second->Cleanup(); + I.second->Cleanup(); } } @@ -151,7 +150,7 @@ static void removeOnDiskEntry(const ASTUnit *AU) { OnDiskDataMap::iterator I = M.find(AU); if (I != M.end()) { I->second->Cleanup(); - M.erase(AU); + M.erase(I); } } @@ -164,8 +163,8 @@ static const std::string &getPreambleFile(const ASTUnit *AU) { } void OnDiskData::CleanTemporaryFiles() { - for (unsigned I = 0, N = TemporaryFiles.size(); I != N; ++I) - llvm::sys::fs::remove(TemporaryFiles[I]); + for (StringRef File : TemporaryFiles) + llvm::sys::fs::remove(File); TemporaryFiles.clear(); } @@ -354,26 +353,24 @@ void ASTUnit::CacheCodeCompletionResults() { // Translate global code completions into cached completions. llvm::DenseMap<CanQualType, unsigned> CompletionTypes; - - for (unsigned I = 0, N = Results.size(); I != N; ++I) { - switch (Results[I].Kind) { + + for (Result &R : Results) { + switch (R.Kind) { case Result::RK_Declaration: { bool IsNestedNameSpecifier = false; CachedCodeCompletionResult CachedResult; - CachedResult.Completion = Results[I].CreateCodeCompletionString(*TheSema, - *CachedCompletionAllocator, - CCTUInfo, - IncludeBriefCommentsInCodeCompletion); - CachedResult.ShowInContexts = getDeclShowContexts(Results[I].Declaration, - Ctx->getLangOpts(), - IsNestedNameSpecifier); - CachedResult.Priority = Results[I].Priority; - CachedResult.Kind = Results[I].CursorKind; - CachedResult.Availability = Results[I].Availability; + CachedResult.Completion = R.CreateCodeCompletionString( + *TheSema, *CachedCompletionAllocator, CCTUInfo, + IncludeBriefCommentsInCodeCompletion); + CachedResult.ShowInContexts = getDeclShowContexts( + R.Declaration, Ctx->getLangOpts(), IsNestedNameSpecifier); + CachedResult.Priority = R.Priority; + CachedResult.Kind = R.CursorKind; + CachedResult.Availability = R.Availability; // Keep track of the type of this completion in an ASTContext-agnostic // way. - QualType UsageType = getDeclUsageType(*Ctx, Results[I].Declaration); + QualType UsageType = getDeclUsageType(*Ctx, R.Declaration); if (UsageType.isNull()) { CachedResult.TypeClass = STC_Void; CachedResult.Type = 0; @@ -398,8 +395,8 @@ void ASTUnit::CacheCodeCompletionResults() { CachedCompletionResults.push_back(CachedResult); /// Handle nested-name-specifiers in C++. - if (TheSema->Context.getLangOpts().CPlusPlus && - IsNestedNameSpecifier && !Results[I].StartsNestedNameSpecifier) { + if (TheSema->Context.getLangOpts().CPlusPlus && IsNestedNameSpecifier && + !R.StartsNestedNameSpecifier) { // The contexts in which a nested-name-specifier can appear in C++. uint64_t NNSContexts = (1LL << CodeCompletionContext::CCC_TopLevel) @@ -415,8 +412,8 @@ void ASTUnit::CacheCodeCompletionResults() { | (1LL << CodeCompletionContext::CCC_PotentiallyQualifiedName) | (1LL << CodeCompletionContext::CCC_ParenthesizedExpression); - if (isa<NamespaceDecl>(Results[I].Declaration) || - isa<NamespaceAliasDecl>(Results[I].Declaration)) + if (isa<NamespaceDecl>(R.Declaration) || + isa<NamespaceAliasDecl>(R.Declaration)) NNSContexts |= (1LL << CodeCompletionContext::CCC_Namespace); if (unsigned RemainingContexts @@ -424,12 +421,10 @@ void ASTUnit::CacheCodeCompletionResults() { // If there any contexts where this completion can be a // nested-name-specifier but isn't already an option, create a // nested-name-specifier completion. - Results[I].StartsNestedNameSpecifier = true; - CachedResult.Completion - = Results[I].CreateCodeCompletionString(*TheSema, - *CachedCompletionAllocator, - CCTUInfo, - IncludeBriefCommentsInCodeCompletion); + R.StartsNestedNameSpecifier = true; + CachedResult.Completion = R.CreateCodeCompletionString( + *TheSema, *CachedCompletionAllocator, CCTUInfo, + IncludeBriefCommentsInCodeCompletion); CachedResult.ShowInContexts = RemainingContexts; CachedResult.Priority = CCP_NestedNameSpecifier; CachedResult.TypeClass = STC_Void; @@ -448,11 +443,9 @@ void ASTUnit::CacheCodeCompletionResults() { case Result::RK_Macro: { CachedCodeCompletionResult CachedResult; - CachedResult.Completion - = Results[I].CreateCodeCompletionString(*TheSema, - *CachedCompletionAllocator, - CCTUInfo, - IncludeBriefCommentsInCodeCompletion); + CachedResult.Completion = R.CreateCodeCompletionString( + *TheSema, *CachedCompletionAllocator, CCTUInfo, + IncludeBriefCommentsInCodeCompletion); CachedResult.ShowInContexts = (1LL << CodeCompletionContext::CCC_TopLevel) | (1LL << CodeCompletionContext::CCC_ObjCInterface) @@ -466,10 +459,10 @@ void ASTUnit::CacheCodeCompletionResults() { | (1LL << CodeCompletionContext::CCC_PreprocessorExpression) | (1LL << CodeCompletionContext::CCC_ParenthesizedExpression) | (1LL << CodeCompletionContext::CCC_OtherWithMacros); - - CachedResult.Priority = Results[I].Priority; - CachedResult.Kind = Results[I].CursorKind; - CachedResult.Availability = Results[I].Availability; + + CachedResult.Priority = R.Priority; + CachedResult.Kind = R.CursorKind; + CachedResult.Availability = R.Availability; CachedResult.TypeClass = STC_Void; CachedResult.Type = 0; CachedCompletionResults.push_back(CachedResult); @@ -520,8 +513,8 @@ public: return false; } - bool ReadTargetOptions(const TargetOptions &TargetOpts, - bool Complain) override { + bool ReadTargetOptions(const TargetOptions &TargetOpts, bool Complain, + bool AllowCompatibleDifferences) override { // If we've already initialized the target, don't do it again. if (Target) return false; @@ -689,8 +682,8 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile( PreprocessorOptions *PPOpts = new PreprocessorOptions(); - for (unsigned I = 0, N = RemappedFiles.size(); I != N; ++I) - PPOpts->addRemappedFile(RemappedFiles[I].first, RemappedFiles[I].second); + for (const auto &RemappedFile : RemappedFiles) + PPOpts->addRemappedFile(RemappedFile.first, RemappedFile.second); // Gather Info for preprocessor construction later on. @@ -721,6 +714,13 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile( *AST->PP, Context, AST->ASTFileLangOpts, AST->TargetOpts, AST->Target, Counter)); + // Attach the AST reader to the AST context as an external AST + // source, so that declarations will be deserialized from the + // AST file as needed. + // We need the external source to be set up before we read the AST, because + // eagerly-deserialized declarations may use it. + Context.setExternalSource(AST->Reader); + switch (AST->Reader->ReadAST(Filename, serialization::MK_MainFile, SourceLocation(), ASTReader::ARR_None)) { case ASTReader::Success: @@ -740,11 +740,6 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile( PP.setCounterValue(Counter); - // Attach the AST reader to the AST context as an external AST - // source, so that declarations will be deserialized from the - // AST file as needed. - Context.setExternalSource(AST->Reader); - // Create an AST consumer, even though it isn't used. AST->Consumer.reset(new ASTConsumer); @@ -853,8 +848,8 @@ public: } bool HandleTopLevelDecl(DeclGroupRef D) override { - for (DeclGroupRef::iterator it = D.begin(), ie = D.end(); it != ie; ++it) - handleTopLevelDecl(*it); + for (Decl *TopLevelDecl : D) + handleTopLevelDecl(TopLevelDecl); return true; } @@ -862,8 +857,8 @@ public: void HandleInterestingDecl(DeclGroupRef) override {} void HandleTopLevelDeclInObjCContainer(DeclGroupRef D) override { - for (DeclGroupRef::iterator it = D.begin(), ie = D.end(); it != ie; ++it) - handleTopLevelDecl(*it); + for (Decl *TopLevelDecl : D) + handleTopLevelDecl(TopLevelDecl); } ASTMutationListener *GetASTMutationListener() override { @@ -931,9 +926,8 @@ public: Hash = 0; } - bool HandleTopLevelDecl(DeclGroupRef D) override { - for (DeclGroupRef::iterator it = D.begin(), ie = D.end(); it != ie; ++it) { - Decl *D = *it; + bool HandleTopLevelDecl(DeclGroupRef DG) override { + for (Decl *D : DG) { // FIXME: Currently ObjC method declarations are incorrectly being // reported as top-level declarations, even though their DeclContext // is the containing ObjC @interface/@implementation. This is a @@ -953,8 +947,7 @@ public: // parsing into declaration IDs in the precompiled // preamble. This will allow us to deserialize those top-level // declarations when requested. - for (unsigned I = 0, N = TopLevelDecls.size(); I != N; ++I) { - Decl *D = TopLevelDecls[I]; + for (Decl *D : TopLevelDecls) { // Invalid top-level decls may not have been serialized. if (D->isInvalidDecl()) continue; @@ -973,9 +966,9 @@ PrecompilePreambleAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) { std::string Sysroot; std::string OutputFile; - raw_ostream *OS = nullptr; - if (GeneratePCHAction::ComputeASTConsumerArguments(CI, InFile, Sysroot, - OutputFile, OS)) + raw_ostream *OS = GeneratePCHAction::ComputeASTConsumerArguments( + CI, InFile, Sysroot, OutputFile); + if (!OS) return nullptr; if (!CI.getFrontendOpts().RelocatablePCH) @@ -1009,10 +1002,10 @@ static void checkAndSanitizeDiags(SmallVectorImpl<StoredDiagnostic> & // been careful to make sure that the source manager's state // before and after are identical, so that we can reuse the source // location itself. - for (unsigned I = 0, N = StoredDiagnostics.size(); I < N; ++I) { - if (StoredDiagnostics[I].getLocation().isValid()) { - FullSourceLoc Loc(StoredDiagnostics[I].getLocation(), SM); - StoredDiagnostics[I].setLocation(Loc); + for (StoredDiagnostic &SD : StoredDiagnostics) { + if (SD.getLocation().isValid()) { + FullSourceLoc Loc(SD.getLocation(), SM); + SD.setLocation(Loc); } } } @@ -1300,14 +1293,10 @@ makeStandaloneDiagnostic(const LangOptions &LangOpts, if (OutDiag.Filename.empty()) return OutDiag; OutDiag.LocOffset = SM.getFileOffset(FileLoc); - for (StoredDiagnostic::range_iterator - I = InDiag.range_begin(), E = InDiag.range_end(); I != E; ++I) { - OutDiag.Ranges.push_back(makeStandaloneRange(*I, SM, LangOpts)); - } - for (StoredDiagnostic::fixit_iterator I = InDiag.fixit_begin(), - E = InDiag.fixit_end(); - I != E; ++I) - OutDiag.FixIts.push_back(makeStandaloneFixIt(SM, LangOpts, *I)); + for (const CharSourceRange &Range : InDiag.getRanges()) + OutDiag.Ranges.push_back(makeStandaloneRange(Range, SM, LangOpts)); + for (const FixItHint &FixIt : InDiag.getFixIts()) + OutDiag.FixIts.push_back(makeStandaloneFixIt(SM, LangOpts, FixIt)); return OutDiag; } @@ -1634,11 +1623,10 @@ void ASTUnit::RealizeTopLevelDeclsFromPreamble() { std::vector<Decl *> Resolved; Resolved.reserve(TopLevelDeclsInPreamble.size()); ExternalASTSource &Source = *getASTContext().getExternalSource(); - for (unsigned I = 0, N = TopLevelDeclsInPreamble.size(); I != N; ++I) { + for (serialization::DeclID TopLevelDecl : TopLevelDeclsInPreamble) { // Resolve the declaration ID to an actual declaration, possibly // deserializing the declaration in the process. - Decl *D = Source.GetExternalDecl(TopLevelDeclsInPreamble[I]); - if (D) + if (Decl *D = Source.GetExternalDecl(TopLevelDecl)) Resolved.push_back(D); } TopLevelDeclsInPreamble.clear(); @@ -1943,9 +1931,9 @@ ASTUnit *ASTUnit::LoadFromCommandLine( } // Override any files that need remapping - for (unsigned I = 0, N = RemappedFiles.size(); I != N; ++I) { - CI->getPreprocessorOpts().addRemappedFile(RemappedFiles[I].first, - RemappedFiles[I].second); + for (const auto &RemappedFile : RemappedFiles) { + CI->getPreprocessorOpts().addRemappedFile(RemappedFile.first, + RemappedFile.second); } PreprocessorOptions &PPOpts = CI->getPreprocessorOpts(); PPOpts.RemappedFilesKeepOriginalName = RemappedFilesKeepOriginalName; @@ -2015,9 +2003,9 @@ bool ASTUnit::Reparse(ArrayRef<RemappedFile> RemappedFiles) { delete RB.second; Invocation->getPreprocessorOpts().clearRemappedFiles(); - for (unsigned I = 0, N = RemappedFiles.size(); I != N; ++I) { - Invocation->getPreprocessorOpts().addRemappedFile(RemappedFiles[I].first, - RemappedFiles[I].second); + for (const auto &RemappedFile : RemappedFiles) { + Invocation->getPreprocessorOpts().addRemappedFile(RemappedFile.first, + RemappedFile.second); } // If we have a preamble file lying around, or if we might try to @@ -2375,10 +2363,9 @@ void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column, // Remap files. PreprocessorOpts.clearRemappedFiles(); PreprocessorOpts.RetainRemappedFileBuffers = true; - for (unsigned I = 0, N = RemappedFiles.size(); I != N; ++I) { - PreprocessorOpts.addRemappedFile(RemappedFiles[I].first, - RemappedFiles[I].second); - OwnedBuffers.push_back(RemappedFiles[I].second); + for (const auto &RemappedFile : RemappedFiles) { + PreprocessorOpts.addRemappedFile(RemappedFile.first, RemappedFile.second); + OwnedBuffers.push_back(RemappedFile.second); } // Use the code completion consumer we were given, but adding any cached @@ -2446,7 +2433,7 @@ bool ASTUnit::Save(StringRef File) { TempPath = File; TempPath += "-%%%%%%%%"; int fd; - if (llvm::sys::fs::createUniqueFile(TempPath.str(), fd, TempPath)) + if (llvm::sys::fs::createUniqueFile(TempPath, fd, TempPath)) return true; // FIXME: Can we somehow regenerate the stat cache here, or do we need to @@ -2460,8 +2447,8 @@ bool ASTUnit::Save(StringRef File) { return true; } - if (llvm::sys::fs::rename(TempPath.str(), File)) { - llvm::sys::fs::remove(TempPath.str()); + if (llvm::sys::fs::rename(TempPath, File)) { + llvm::sys::fs::remove(TempPath); return true; } @@ -2509,9 +2496,8 @@ void ASTUnit::TranslateStoredDiagnostics( SmallVector<StoredDiagnostic, 4> Result; Result.reserve(Diags.size()); - for (unsigned I = 0, N = Diags.size(); I != N; ++I) { + for (const StandaloneDiagnostic &SD : Diags) { // Rebuild the StoredDiagnostic. - const StandaloneDiagnostic &SD = Diags[I]; if (SD.Filename.empty()) continue; const FileEntry *FE = FileMgr.getFile(SD.Filename); @@ -2526,23 +2512,20 @@ void ASTUnit::TranslateStoredDiagnostics( SmallVector<CharSourceRange, 4> Ranges; Ranges.reserve(SD.Ranges.size()); - for (std::vector<std::pair<unsigned, unsigned> >::const_iterator - I = SD.Ranges.begin(), E = SD.Ranges.end(); I != E; ++I) { - SourceLocation BL = FileLoc.getLocWithOffset((*I).first); - SourceLocation EL = FileLoc.getLocWithOffset((*I).second); + for (const auto &Range : SD.Ranges) { + SourceLocation BL = FileLoc.getLocWithOffset(Range.first); + SourceLocation EL = FileLoc.getLocWithOffset(Range.second); Ranges.push_back(CharSourceRange::getCharRange(BL, EL)); } SmallVector<FixItHint, 2> FixIts; FixIts.reserve(SD.FixIts.size()); - for (std::vector<StandaloneFixIt>::const_iterator - I = SD.FixIts.begin(), E = SD.FixIts.end(); - I != E; ++I) { + for (const StandaloneFixIt &FixIt : SD.FixIts) { FixIts.push_back(FixItHint()); FixItHint &FH = FixIts.back(); - FH.CodeToInsert = I->CodeToInsert; - SourceLocation BL = FileLoc.getLocWithOffset(I->RemoveRange.first); - SourceLocation EL = FileLoc.getLocWithOffset(I->RemoveRange.second); + FH.CodeToInsert = FixIt.CodeToInsert; + SourceLocation BL = FileLoc.getLocWithOffset(FixIt.RemoveRange.first); + SourceLocation EL = FileLoc.getLocWithOffset(FixIt.RemoveRange.second); FH.RemoveRange = CharSourceRange::getCharRange(BL, EL); } @@ -2736,7 +2719,7 @@ SourceLocation ASTUnit::getStartOfMainFileID() { return SourceMgr->getLocForStartOfFile(FID); } -std::pair<PreprocessingRecord::iterator, PreprocessingRecord::iterator> +llvm::iterator_range<PreprocessingRecord::iterator> ASTUnit::getLocalPreprocessingEntities() const { if (isMainFileAST()) { serialization::ModuleFile & @@ -2745,20 +2728,18 @@ ASTUnit::getLocalPreprocessingEntities() const { } if (PreprocessingRecord *PPRec = PP->getPreprocessingRecord()) - return std::make_pair(PPRec->local_begin(), PPRec->local_end()); + return llvm::make_range(PPRec->local_begin(), PPRec->local_end()); - return std::make_pair(PreprocessingRecord::iterator(), - PreprocessingRecord::iterator()); + return llvm::make_range(PreprocessingRecord::iterator(), + PreprocessingRecord::iterator()); } bool ASTUnit::visitLocalTopLevelDecls(void *context, DeclVisitorFn Fn) { if (isMainFileAST()) { serialization::ModuleFile & Mod = Reader->getModuleManager().getPrimaryModule(); - ASTReader::ModuleDeclIterator MDI, MDE; - std::tie(MDI, MDE) = Reader->getModuleFileLevelDecls(Mod); - for (; MDI != MDE; ++MDI) { - if (!Fn(context, *MDI)) + for (const Decl *D : Reader->getModuleFileLevelDecls(Mod)) { + if (!Fn(context, D)) return false; } @@ -2821,11 +2802,8 @@ void ASTUnit::PreambleData::countLines() const { if (empty()) return; - for (std::vector<char>::const_iterator - I = Buffer.begin(), E = Buffer.end(); I != E; ++I) { - if (*I == '\n') - ++NumLines; - } + NumLines = std::count(Buffer.begin(), Buffer.end(), '\n'); + if (Buffer.back() != '\n') ++NumLines; } diff --git a/contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp b/contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp index d909d52..7d2a09c 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp @@ -183,7 +183,7 @@ class PTHWriter { typedef llvm::StringMap<OffsetOpt, llvm::BumpPtrAllocator> CachedStrsTy; IDMap IM; - llvm::raw_fd_ostream& Out; + raw_pwrite_stream &Out; Preprocessor& PP; uint32_t idcount; PTHMap PM; @@ -236,8 +236,8 @@ class PTHWriter { Offset EmitCachedSpellings(); public: - PTHWriter(llvm::raw_fd_ostream& out, Preprocessor& pp) - : Out(out), PP(pp), idcount(0), CurStrOffset(0) {} + PTHWriter(raw_pwrite_stream &out, Preprocessor &pp) + : Out(out), PP(pp), idcount(0), CurStrOffset(0) {} PTHMap &getPM() { return PM; } void GeneratePTH(const std::string &MainFile); @@ -468,6 +468,16 @@ Offset PTHWriter::EmitCachedSpellings() { return SpellingsOff; } +static uint32_t swap32le(uint32_t X) { + return llvm::support::endian::byte_swap<uint32_t, llvm::support::little>(X); +} + +static void pwrite32le(raw_pwrite_stream &OS, uint32_t Val, uint64_t &Off) { + uint32_t LEVal = swap32le(Val); + OS.pwrite(reinterpret_cast<const char *>(&LEVal), 4, Off); + Off += 4; +} + void PTHWriter::GeneratePTH(const std::string &MainFile) { // Generate the prologue. Out << "cfe-pth" << '\0'; @@ -520,11 +530,11 @@ void PTHWriter::GeneratePTH(const std::string &MainFile) { Offset FileTableOff = EmitFileTable(); // Finally, write the prologue. - Out.seek(PrologueOffset); - Emit32(IdTableOff.first); - Emit32(IdTableOff.second); - Emit32(FileTableOff); - Emit32(SpellingOff); + uint64_t Off = PrologueOffset; + pwrite32le(Out, IdTableOff.first, Off); + pwrite32le(Out, IdTableOff.second, Off); + pwrite32le(Out, FileTableOff, Off); + pwrite32le(Out, SpellingOff, Off); } namespace { @@ -537,7 +547,7 @@ class StatListener : public FileSystemStatCache { PTHMap &PM; public: StatListener(PTHMap &pm) : PM(pm) {} - ~StatListener() {} + ~StatListener() override {} LookupResult getStat(const char *Path, FileData &Data, bool isFile, std::unique_ptr<vfs::File> *F, @@ -559,8 +569,7 @@ public: }; } // end anonymous namespace - -void clang::CacheTokens(Preprocessor &PP, llvm::raw_fd_ostream* OS) { +void clang::CacheTokens(Preprocessor &PP, raw_pwrite_stream *OS) { // Get the name of the main file. const SourceManager &SrcMgr = PP.getSourceManager(); const FileEntry *MainFile = SrcMgr.getFileEntryForID(SrcMgr.getMainFileID()); diff --git a/contrib/llvm/tools/clang/lib/Frontend/ChainedIncludesSource.cpp b/contrib/llvm/tools/clang/lib/Frontend/ChainedIncludesSource.cpp index cb260b4..f3677f8 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/ChainedIncludesSource.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/ChainedIncludesSource.cpp @@ -27,7 +27,7 @@ using namespace clang; namespace { class ChainedIncludesSource : public ExternalSemaSource { public: - virtual ~ChainedIncludesSource(); + ~ChainedIncludesSource() override; ExternalSemaSource &getFinalReader() const { return *FinalReader; } @@ -43,6 +43,7 @@ protected: Selector GetExternalSelector(uint32_t ID) override; uint32_t GetNumExternalSelectors() override; Stmt *GetExternalDeclStmt(uint64_t Offset) override; + CXXCtorInitializer **GetExternalCXXCtorInitializers(uint64_t Offset) override; CXXBaseSpecifier *GetExternalCXXBaseSpecifiers(uint64_t Offset) override; bool FindExternalVisibleDeclsByName(const DeclContext *DC, DeclarationName Name) override; @@ -232,6 +233,10 @@ CXXBaseSpecifier * ChainedIncludesSource::GetExternalCXXBaseSpecifiers(uint64_t Offset) { return getFinalReader().GetExternalCXXBaseSpecifiers(Offset); } +CXXCtorInitializer ** +ChainedIncludesSource::GetExternalCXXCtorInitializers(uint64_t Offset) { + return getFinalReader().GetExternalCXXCtorInitializers(Offset); +} bool ChainedIncludesSource::FindExternalVisibleDeclsByName(const DeclContext *DC, DeclarationName Name) { diff --git a/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp b/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp index 93a34b7..9e01727 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp @@ -329,14 +329,8 @@ void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) { PP->setPreprocessedOutput(getPreprocessorOutputOpts().ShowCPP); - // Set up the module path, including the hash for the - // module-creation options. - SmallString<256> SpecificModuleCache( - getHeaderSearchOpts().ModuleCachePath); - if (!getHeaderSearchOpts().DisableModuleHash) - llvm::sys::path::append(SpecificModuleCache, - getInvocation().getModuleHash()); - PP->getHeaderSearchInfo().setModuleCachePath(SpecificModuleCache); + if (PP->getLangOpts().Modules) + PP->getHeaderSearchInfo().setModuleCachePath(getSpecificModuleCachePath()); // Handle generating dependencies, if requested. const DependencyOutputOptions &DepOpts = getDependencyOutputOpts(); @@ -371,14 +365,17 @@ void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) { AttachHeaderIncludeGen(*PP, /*ShowAllHeaders=*/false, /*OutputPath=*/"", /*ShowDepth=*/true, /*MSStyle=*/true); } +} - // Load all explictly-specified module map files. - for (const auto &Filename : getFrontendOpts().ModuleMapFiles) { - if (auto *File = getFileManager().getFile(Filename)) - PP->getHeaderSearchInfo().loadModuleMapFile(File, /*IsSystem*/false); - else - getDiagnostics().Report(diag::err_module_map_not_found) << Filename; - } +std::string CompilerInstance::getSpecificModuleCachePath() { + // Set up the module path, including the hash for the + // module-creation options. + SmallString<256> SpecificModuleCache( + getHeaderSearchOpts().ModuleCachePath); + if (!getHeaderSearchOpts().DisableModuleHash) + llvm::sys::path::append(SpecificModuleCache, + getInvocation().getModuleHash()); + return SpecificModuleCache.str(); } // ASTContext @@ -396,32 +393,30 @@ void CompilerInstance::createASTContext() { void CompilerInstance::createPCHExternalASTSource( StringRef Path, bool DisablePCHValidation, bool AllowPCHWithCompilerErrors, void *DeserializationListener, bool OwnDeserializationListener) { - IntrusiveRefCntPtr<ExternalASTSource> Source; bool Preamble = getPreprocessorOpts().PrecompiledPreambleBytes.first != 0; - Source = createPCHExternalASTSource( + ModuleManager = createPCHExternalASTSource( Path, getHeaderSearchOpts().Sysroot, DisablePCHValidation, AllowPCHWithCompilerErrors, getPreprocessor(), getASTContext(), DeserializationListener, OwnDeserializationListener, Preamble, getFrontendOpts().UseGlobalModuleIndex); - ModuleManager = static_cast<ASTReader*>(Source.get()); - getASTContext().setExternalSource(Source); } -ExternalASTSource *CompilerInstance::createPCHExternalASTSource( +IntrusiveRefCntPtr<ASTReader> CompilerInstance::createPCHExternalASTSource( StringRef Path, const std::string &Sysroot, bool DisablePCHValidation, bool AllowPCHWithCompilerErrors, Preprocessor &PP, ASTContext &Context, void *DeserializationListener, bool OwnDeserializationListener, bool Preamble, bool UseGlobalModuleIndex) { HeaderSearchOptions &HSOpts = PP.getHeaderSearchInfo().getHeaderSearchOpts(); - std::unique_ptr<ASTReader> Reader; - Reader.reset(new ASTReader(PP, Context, - Sysroot.empty() ? "" : Sysroot.c_str(), - DisablePCHValidation, - AllowPCHWithCompilerErrors, - /*AllowConfigurationMismatch*/false, - HSOpts.ModulesValidateSystemHeaders, - UseGlobalModuleIndex)); + IntrusiveRefCntPtr<ASTReader> Reader( + new ASTReader(PP, Context, Sysroot.empty() ? "" : Sysroot.c_str(), + DisablePCHValidation, AllowPCHWithCompilerErrors, + /*AllowConfigurationMismatch*/ false, + HSOpts.ModulesValidateSystemHeaders, UseGlobalModuleIndex)); + + // We need the external source to be set up before we read the AST, because + // eagerly-deserialized declarations may use it. + Context.setExternalSource(Reader.get()); Reader->setDeserializationListener( static_cast<ASTDeserializationListener *>(DeserializationListener), @@ -435,7 +430,7 @@ ExternalASTSource *CompilerInstance::createPCHExternalASTSource( // Set the predefines buffer as suggested by the PCH reader. Typically, the // predefines buffer will be empty. PP.setPredefines(Reader->getSuggestedPredefines()); - return Reader.release(); + return Reader; case ASTReader::Failure: // Unrecoverable failure: don't even try to process the input file. @@ -450,6 +445,7 @@ ExternalASTSource *CompilerInstance::createPCHExternalASTSource( break; } + Context.setExternalSource(nullptr); return nullptr; } @@ -522,42 +518,43 @@ void CompilerInstance::createSema(TranslationUnitKind TUKind, // Output Files -void CompilerInstance::addOutputFile(const OutputFile &OutFile) { +void CompilerInstance::addOutputFile(OutputFile &&OutFile) { assert(OutFile.OS && "Attempt to add empty stream to output list!"); - OutputFiles.push_back(OutFile); + OutputFiles.push_back(std::move(OutFile)); } void CompilerInstance::clearOutputFiles(bool EraseFiles) { - for (std::list<OutputFile>::iterator - it = OutputFiles.begin(), ie = OutputFiles.end(); it != ie; ++it) { - delete it->OS; - if (!it->TempFilename.empty()) { + for (OutputFile &OF : OutputFiles) { + // Manually close the stream before we rename it. + OF.OS.reset(); + + if (!OF.TempFilename.empty()) { if (EraseFiles) { - llvm::sys::fs::remove(it->TempFilename); + llvm::sys::fs::remove(OF.TempFilename); } else { - SmallString<128> NewOutFile(it->Filename); + SmallString<128> NewOutFile(OF.Filename); // If '-working-directory' was passed, the output filename should be // relative to that. FileMgr->FixupRelativePath(NewOutFile); if (std::error_code ec = - llvm::sys::fs::rename(it->TempFilename, NewOutFile.str())) { + llvm::sys::fs::rename(OF.TempFilename, NewOutFile)) { getDiagnostics().Report(diag::err_unable_to_rename_temp) - << it->TempFilename << it->Filename << ec.message(); + << OF.TempFilename << OF.Filename << ec.message(); - llvm::sys::fs::remove(it->TempFilename); + llvm::sys::fs::remove(OF.TempFilename); } } - } else if (!it->Filename.empty() && EraseFiles) - llvm::sys::fs::remove(it->Filename); + } else if (!OF.Filename.empty() && EraseFiles) + llvm::sys::fs::remove(OF.Filename); } OutputFiles.clear(); + NonSeekStream.reset(); } -llvm::raw_fd_ostream * -CompilerInstance::createDefaultOutputFile(bool Binary, - StringRef InFile, +raw_pwrite_stream * +CompilerInstance::createDefaultOutputFile(bool Binary, StringRef InFile, StringRef Extension) { return createOutputFile(getFrontendOpts().OutputFile, Binary, /*RemoveFileOnSignal=*/true, InFile, Extension, @@ -565,21 +562,20 @@ CompilerInstance::createDefaultOutputFile(bool Binary, } llvm::raw_null_ostream *CompilerInstance::createNullOutputFile() { - llvm::raw_null_ostream *OS = new llvm::raw_null_ostream(); - addOutputFile(OutputFile("", "", OS)); - return OS; + auto OS = llvm::make_unique<llvm::raw_null_ostream>(); + llvm::raw_null_ostream *Ret = OS.get(); + addOutputFile(OutputFile("", "", std::move(OS))); + return Ret; } -llvm::raw_fd_ostream * -CompilerInstance::createOutputFile(StringRef OutputPath, - bool Binary, bool RemoveFileOnSignal, - StringRef InFile, - StringRef Extension, - bool UseTemporary, +raw_pwrite_stream * +CompilerInstance::createOutputFile(StringRef OutputPath, bool Binary, + bool RemoveFileOnSignal, StringRef InFile, + StringRef Extension, bool UseTemporary, bool CreateMissingDirectories) { std::string OutputPathName, TempPathName; std::error_code EC; - llvm::raw_fd_ostream *OS = createOutputFile( + std::unique_ptr<raw_pwrite_stream> OS = createOutputFile( OutputPath, EC, Binary, RemoveFileOnSignal, InFile, Extension, UseTemporary, CreateMissingDirectories, &OutputPathName, &TempPathName); if (!OS) { @@ -588,15 +584,16 @@ CompilerInstance::createOutputFile(StringRef OutputPath, return nullptr; } + raw_pwrite_stream *Ret = OS.get(); // Add the output file -- but don't try to remove "-", since this means we are // using stdin. addOutputFile(OutputFile((OutputPathName != "-") ? OutputPathName : "", - TempPathName, OS)); + TempPathName, std::move(OS))); - return OS; + return Ret; } -llvm::raw_fd_ostream *CompilerInstance::createOutputFile( +std::unique_ptr<llvm::raw_pwrite_stream> CompilerInstance::createOutputFile( StringRef OutputPath, std::error_code &Error, bool Binary, bool RemoveFileOnSignal, StringRef InFile, StringRef Extension, bool UseTemporary, bool CreateMissingDirectories, @@ -646,14 +643,14 @@ llvm::raw_fd_ostream *CompilerInstance::createOutputFile( TempPath += "-%%%%%%%%"; int fd; std::error_code EC = - llvm::sys::fs::createUniqueFile(TempPath.str(), fd, TempPath); + llvm::sys::fs::createUniqueFile(TempPath, fd, TempPath); if (CreateMissingDirectories && EC == llvm::errc::no_such_file_or_directory) { StringRef Parent = llvm::sys::path::parent_path(OutputPath); EC = llvm::sys::fs::create_directories(Parent); if (!EC) { - EC = llvm::sys::fs::createUniqueFile(TempPath.str(), fd, TempPath); + EC = llvm::sys::fs::createUniqueFile(TempPath, fd, TempPath); } } @@ -684,7 +681,13 @@ llvm::raw_fd_ostream *CompilerInstance::createOutputFile( if (TempPathName) *TempPathName = TempFile; - return OS.release(); + if (!Binary || OS->supportsSeeking()) + return std::move(OS); + + auto B = llvm::make_unique<llvm::buffer_ostream>(*OS); + assert(!NonSeekStream); + NonSeekStream = std::move(OS); + return std::move(B); } // Initialization Utilities @@ -946,16 +949,18 @@ static bool compileModuleImpl(CompilerInstance &ImportingInstance, FrontendOpts.Inputs.push_back( FrontendInputFile(ModuleMapFile->getName(), IK)); } else { + SmallString<128> FakeModuleMapFile(Module->Directory->getName()); + llvm::sys::path::append(FakeModuleMapFile, "__inferred_module.map"); + FrontendOpts.Inputs.push_back(FrontendInputFile(FakeModuleMapFile, IK)); + llvm::raw_string_ostream OS(InferredModuleMapContent); Module->print(OS); OS.flush(); - FrontendOpts.Inputs.push_back( - FrontendInputFile("__inferred_module.map", IK)); std::unique_ptr<llvm::MemoryBuffer> ModuleMapBuffer = llvm::MemoryBuffer::getMemBuffer(InferredModuleMapContent); ModuleMapFile = Instance.getFileManager().getVirtualFile( - "__inferred_module.map", InferredModuleMapContent.size(), 0); + FakeModuleMapFile, InferredModuleMapContent.size(), 0); SourceMgr.overrideFileContents(ModuleMapFile, std::move(ModuleMapBuffer)); } @@ -1031,9 +1036,19 @@ static bool compileAndLoadModule(CompilerInstance &ImportingInstance, case llvm::LockFileManager::LFS_Shared: // Someone else is responsible for building the module. Wait for them to // finish. - if (Locked.waitForUnlock() == llvm::LockFileManager::Res_OwnerDied) + switch (Locked.waitForUnlock()) { + case llvm::LockFileManager::Res_Success: + ModuleLoadCapabilities |= ASTReader::ARR_OutOfDate; + break; + case llvm::LockFileManager::Res_OwnerDied: continue; // try again to get the lock. - ModuleLoadCapabilities |= ASTReader::ARR_OutOfDate; + case llvm::LockFileManager::Res_Timeout: + Diags.Report(ModuleNameLoc, diag::err_module_lock_timeout) + << Module->Name; + // Clear the lock file so that future invokations can make progress. + Locked.unsafeRemoveLockFile(); + return false; + } break; } @@ -1071,79 +1086,51 @@ static void checkConfigMacro(Preprocessor &PP, StringRef ConfigMacro, // not have changed. if (!Id->hadMacroDefinition()) return; + auto *LatestLocalMD = PP.getLocalMacroDirectiveHistory(Id); - // If this identifier does not currently have a macro definition, - // check whether it had one on the command line. - if (!Id->hasMacroDefinition()) { - MacroDirective::DefInfo LatestDef = - PP.getMacroDirectiveHistory(Id)->getDefinition(); - for (MacroDirective::DefInfo Def = LatestDef; Def; - Def = Def.getPreviousDefinition()) { - FileID FID = SourceMgr.getFileID(Def.getLocation()); - if (FID.isInvalid()) - continue; - - // We only care about the predefines buffer. - if (FID != PP.getPredefinesFileID()) - continue; - - // This macro was defined on the command line, then #undef'd later. - // Complain. - PP.Diag(ImportLoc, diag::warn_module_config_macro_undef) - << true << ConfigMacro << Mod->getFullModuleName(); - if (LatestDef.isUndefined()) - PP.Diag(LatestDef.getUndefLocation(), diag::note_module_def_undef_here) - << true; - return; - } - - // Okay: no definition in the predefines buffer. - return; - } - - // This identifier has a macro definition. Check whether we had a definition - // on the command line. - MacroDirective::DefInfo LatestDef = - PP.getMacroDirectiveHistory(Id)->getDefinition(); - MacroDirective::DefInfo PredefinedDef; - for (MacroDirective::DefInfo Def = LatestDef; Def; - Def = Def.getPreviousDefinition()) { - FileID FID = SourceMgr.getFileID(Def.getLocation()); - if (FID.isInvalid()) - continue; - + // Find the macro definition from the command line. + MacroInfo *CmdLineDefinition = nullptr; + for (auto *MD = LatestLocalMD; MD; MD = MD->getPrevious()) { // We only care about the predefines buffer. - if (FID != PP.getPredefinesFileID()) + FileID FID = SourceMgr.getFileID(MD->getLocation()); + if (FID.isInvalid() || FID != PP.getPredefinesFileID()) continue; - - PredefinedDef = Def; + if (auto *DMD = dyn_cast<DefMacroDirective>(MD)) + CmdLineDefinition = DMD->getMacroInfo(); break; } - // If there was no definition for this macro in the predefines buffer, - // complain. - if (!PredefinedDef || - (!PredefinedDef.getLocation().isValid() && - PredefinedDef.getUndefLocation().isValid())) { + auto *CurrentDefinition = PP.getMacroInfo(Id); + if (CurrentDefinition == CmdLineDefinition) { + // Macro matches. Nothing to do. + } else if (!CurrentDefinition) { + // This macro was defined on the command line, then #undef'd later. + // Complain. + PP.Diag(ImportLoc, diag::warn_module_config_macro_undef) + << true << ConfigMacro << Mod->getFullModuleName(); + auto LatestDef = LatestLocalMD->getDefinition(); + assert(LatestDef.isUndefined() && + "predefined macro went away with no #undef?"); + PP.Diag(LatestDef.getUndefLocation(), diag::note_module_def_undef_here) + << true; + return; + } else if (!CmdLineDefinition) { + // There was no definition for this macro in the predefines buffer, + // but there was a local definition. Complain. PP.Diag(ImportLoc, diag::warn_module_config_macro_undef) << false << ConfigMacro << Mod->getFullModuleName(); - PP.Diag(LatestDef.getLocation(), diag::note_module_def_undef_here) + PP.Diag(CurrentDefinition->getDefinitionLoc(), + diag::note_module_def_undef_here) + << false; + } else if (!CurrentDefinition->isIdenticalTo(*CmdLineDefinition, PP, + /*Syntactically=*/true)) { + // The macro definitions differ. + PP.Diag(ImportLoc, diag::warn_module_config_macro_undef) + << false << ConfigMacro << Mod->getFullModuleName(); + PP.Diag(CurrentDefinition->getDefinitionLoc(), + diag::note_module_def_undef_here) << false; - return; } - - // If the current macro definition is the same as the predefined macro - // definition, it's okay. - if (LatestDef.getMacroInfo() == PredefinedDef.getMacroInfo() || - LatestDef.getMacroInfo()->isIdenticalTo(*PredefinedDef.getMacroInfo(),PP, - /*Syntactically=*/true)) - return; - - // The macro definitions differ. - PP.Diag(ImportLoc, diag::warn_module_config_macro_undef) - << false << ConfigMacro << Mod->getFullModuleName(); - PP.Diag(LatestDef.getLocation(), diag::note_module_def_undef_here) - << false; } /// \brief Write a new timestamp file with the given path. @@ -1186,8 +1173,7 @@ static void pruneModuleCache(const HeaderSearchOptions &HSOpts) { std::error_code EC; SmallString<128> ModuleCachePathNative; llvm::sys::path::native(HSOpts.ModuleCachePath, ModuleCachePathNative); - for (llvm::sys::fs::directory_iterator - Dir(ModuleCachePathNative.str(), EC), DirEnd; + for (llvm::sys::fs::directory_iterator Dir(ModuleCachePathNative, EC), DirEnd; Dir != DirEnd && !EC; Dir.increment(EC)) { // If we don't have a directory, there's nothing to look into. if (!llvm::sys::fs::is_directory(Dir->path())) @@ -1235,9 +1221,10 @@ void CompilerInstance::createModuleManager() { if (!hasASTContext()) createASTContext(); - // If we're not recursively building a module, check whether we - // need to prune the module cache. - if (getSourceManager().getModuleBuildStack().empty() && + // If we're implicitly building modules but not currently recursively + // building a module, check whether we need to prune the module cache. + if (getLangOpts().ImplicitModules && + getSourceManager().getModuleBuildStack().empty() && getHeaderSearchOpts().ModuleCachePruneInterval > 0 && getHeaderSearchOpts().ModuleCachePruneAfter > 0) { pruneModuleCache(getHeaderSearchOpts()); @@ -1274,6 +1261,7 @@ bool CompilerInstance::loadModuleFile(StringRef FileName) { struct ReadModuleNames : ASTReaderListener { CompilerInstance &CI; std::vector<StringRef> ModuleFileStack; + std::vector<StringRef> ModuleNameStack; bool Failed; bool TopFileIsModule; @@ -1283,21 +1271,36 @@ bool CompilerInstance::loadModuleFile(StringRef FileName) { bool needsImportVisitation() const override { return true; } void visitImport(StringRef FileName) override { + if (!CI.ExplicitlyLoadedModuleFiles.insert(FileName).second) { + if (ModuleFileStack.size() == 0) + TopFileIsModule = true; + return; + } + ModuleFileStack.push_back(FileName); + ModuleNameStack.push_back(StringRef()); if (ASTReader::readASTFileControlBlock(FileName, CI.getFileManager(), *this)) { - CI.getDiagnostics().Report(SourceLocation(), - diag::err_module_file_not_found) + CI.getDiagnostics().Report( + SourceLocation(), CI.getFileManager().getBufferForFile(FileName) + ? diag::err_module_file_invalid + : diag::err_module_file_not_found) << FileName; - // FIXME: Produce a note stack explaining how we got here. + for (int I = ModuleFileStack.size() - 2; I >= 0; --I) + CI.getDiagnostics().Report(SourceLocation(), + diag::note_module_file_imported_by) + << ModuleFileStack[I] + << !ModuleNameStack[I].empty() << ModuleNameStack[I]; Failed = true; } + ModuleNameStack.pop_back(); ModuleFileStack.pop_back(); } void ReadModuleName(StringRef ModuleName) override { if (ModuleFileStack.size() == 1) TopFileIsModule = true; + ModuleNameStack.back() = ModuleName; auto &ModuleFile = CI.ModuleFileOverrides[ModuleName]; if (!ModuleFile.empty() && @@ -1310,6 +1313,19 @@ bool CompilerInstance::loadModuleFile(StringRef FileName) { } } RMN(*this); + // If we don't already have an ASTReader, create one now. + if (!ModuleManager) + createModuleManager(); + + // Tell the module manager about this module file. + if (getModuleManager()->getModuleManager().addKnownModuleFile(FileName)) { + getDiagnostics().Report(SourceLocation(), diag::err_module_file_not_found) + << FileName; + return false; + } + + // Build our mapping of module names to module files from this file + // and its imports. RMN.visitImport(FileName); if (RMN.Failed) @@ -1343,7 +1359,7 @@ CompilerInstance::loadModule(SourceLocation ImportLoc, if (LastModuleImportResult && ModuleName != getLangOpts().CurrentModule && ModuleName != getLangOpts().ImplementationOfModule) ModuleManager->makeModuleVisible(LastModuleImportResult, Visibility, - ImportLoc, /*Complain=*/false); + ImportLoc); return LastModuleImportResult; } @@ -1373,6 +1389,12 @@ CompilerInstance::loadModule(SourceLocation ImportLoc, auto Override = ModuleFileOverrides.find(ModuleName); bool Explicit = Override != ModuleFileOverrides.end(); + if (!Explicit && !getLangOpts().ImplicitModules) { + getDiagnostics().Report(ModuleNameLoc, diag::err_module_build_disabled) + << ModuleName; + ModuleBuildFailed = true; + return ModuleLoadResult(); + } std::string ModuleFileName = Explicit ? Override->second @@ -1580,8 +1602,7 @@ CompilerInstance::loadModule(SourceLocation ImportLoc, return ModuleLoadResult(); } - ModuleManager->makeModuleVisible(Module, Visibility, ImportLoc, - /*Complain=*/true); + ModuleManager->makeModuleVisible(Module, Visibility, ImportLoc); } // Check for any configuration macros that have changed. @@ -1591,25 +1612,6 @@ CompilerInstance::loadModule(SourceLocation ImportLoc, Module, ImportLoc); } - // Determine whether we're in the #include buffer for a module. The #includes - // in that buffer do not qualify as module imports; they're just an - // implementation detail of us building the module. - bool IsInModuleIncludes = !getLangOpts().CurrentModule.empty() && - getSourceManager().getFileID(ImportLoc) == - getSourceManager().getMainFileID(); - - // If this module import was due to an inclusion directive, create an - // implicit import declaration to capture it in the AST. - if (IsInclusionDirective && hasASTContext() && !IsInModuleIncludes) { - TranslationUnitDecl *TU = getASTContext().getTranslationUnitDecl(); - ImportDecl *ImportD = ImportDecl::CreateImplicit(getASTContext(), TU, - ImportLoc, Module, - Path.back().second); - TU->addDecl(ImportD); - if (Consumer) - Consumer->HandleImplicitImportDecl(ImportD); - } - LastModuleImportLoc = ImportLoc; LastModuleImportResult = ModuleLoadResult(Module, false); return LastModuleImportResult; @@ -1617,9 +1619,13 @@ CompilerInstance::loadModule(SourceLocation ImportLoc, void CompilerInstance::makeModuleVisible(Module *Mod, Module::NameVisibilityKind Visibility, - SourceLocation ImportLoc, - bool Complain){ - ModuleManager->makeModuleVisible(Mod, Visibility, ImportLoc, Complain); + SourceLocation ImportLoc) { + if (!ModuleManager) + createModuleManager(); + if (!ModuleManager) + return; + + ModuleManager->makeModuleVisible(Mod, Visibility, ImportLoc); } GlobalModuleIndex *CompilerInstance::loadGlobalModuleIndex( diff --git a/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp b/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp index 54025b0..8d3d312 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp @@ -253,7 +253,7 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args, for (unsigned i = 0, e = checkers.size(); i != e; ++i) Opts.CheckersControlList.push_back(std::make_pair(checkers[i], enable)); } - + // Go through the analyzer configuration options. for (arg_iterator it = Args.filtered_begin(OPT_analyzer_config), ie = Args.filtered_end(); it != ie; ++it) { @@ -329,11 +329,8 @@ static void parseSanitizerKinds(StringRef FlagName, const std::vector<std::string> &Sanitizers, DiagnosticsEngine &Diags, SanitizerSet &S) { for (const auto &Sanitizer : Sanitizers) { - SanitizerKind K = llvm::StringSwitch<SanitizerKind>(Sanitizer) -#define SANITIZER(NAME, ID) .Case(NAME, SanitizerKind::ID) -#include "clang/Basic/Sanitizers.def" - .Default(SanitizerKind::Unknown); - if (K == SanitizerKind::Unknown) + SanitizerMask K = parseSanitizerValue(Sanitizer, /*AllowGroups=*/false); + if (K == 0) Diags.Report(diag::err_drv_invalid_value) << FlagName << Sanitizer; else S.set(K, true); @@ -367,6 +364,16 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK, Opts.setInlining(Args.hasArg(OPT_fno_inline_functions) ? CodeGenOptions::OnlyAlwaysInlining : Opts.getInlining()); + if (Arg *A = Args.getLastArg(OPT_fveclib)) { + StringRef Name = A->getValue(); + if (Name == "Accelerate") + Opts.setVecLib(CodeGenOptions::Accelerate); + else if (Name == "none") + Opts.setVecLib(CodeGenOptions::NoLibrary); + else + Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Name; + } + if (Args.hasArg(OPT_gline_tables_only)) { Opts.setDebugInfo(CodeGenOptions::DebugLineTablesOnly); } else if (Args.hasArg(OPT_g_Flag) || Args.hasArg(OPT_gdwarf_2) || @@ -395,6 +402,10 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK, // Default Dwarf version is 4 if we are generating debug information. Opts.DwarfVersion = 4; + if (const Arg *A = + Args.getLastArg(OPT_emit_llvm_uselists, OPT_no_emit_llvm_uselists)) + Opts.EmitLLVMUseLists = A->getOption().getID() == OPT_emit_llvm_uselists; + Opts.DisableLLVMOpts = Args.hasArg(OPT_disable_llvm_optzns); Opts.DisableRedZone = Args.hasArg(OPT_disable_red_zone); Opts.ForbidGuardVariables = Args.hasArg(OPT_fforbid_guard_variables); @@ -417,13 +428,14 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK, Opts.DisableIntegratedAS = Args.hasArg(OPT_fno_integrated_as); Opts.Autolink = !Args.hasArg(OPT_fno_autolink); Opts.SampleProfileFile = Args.getLastArgValue(OPT_fprofile_sample_use_EQ); - Opts.ProfileInstrGenerate = Args.hasArg(OPT_fprofile_instr_generate); + Opts.ProfileInstrGenerate = Args.hasArg(OPT_fprofile_instr_generate) || + Args.hasArg(OPT_fprofile_instr_generate_EQ);; + Opts.InstrProfileOutput = Args.getLastArgValue(OPT_fprofile_instr_generate_EQ); Opts.InstrProfileInput = Args.getLastArgValue(OPT_fprofile_instr_use_EQ); Opts.CoverageMapping = Args.hasArg(OPT_fcoverage_mapping); Opts.DumpCoverageMapping = Args.hasArg(OPT_dump_coverage_mapping); Opts.AsmVerbose = Args.hasArg(OPT_masm_verbose); Opts.ObjCAutoRefCountExceptions = Args.hasArg(OPT_fobjc_arc_exceptions); - Opts.CUDAIsDevice = Args.hasArg(OPT_fcuda_is_device); Opts.CXAAtExit = !Args.hasArg(OPT_fno_use_cxa_atexit); Opts.CXXCtorDtorAliases = Args.hasArg(OPT_mconstructor_aliases); Opts.CodeModel = getCodeModel(Args, Diags); @@ -441,11 +453,11 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK, Args.hasArg(OPT_cl_unsafe_math_optimizations) || Args.hasArg(OPT_cl_finite_math_only) || Args.hasArg(OPT_cl_fast_relaxed_math)); - Opts.NoSignedZeros = Args.hasArg(OPT_cl_no_signed_zeros); + Opts.NoSignedZeros = Args.hasArg(OPT_fno_signed_zeros); + Opts.ReciprocalMath = Args.hasArg(OPT_freciprocal_math); Opts.NoZeroInitializedInBSS = Args.hasArg(OPT_mno_zero_initialized_in_bss); Opts.BackendOptions = Args.getAllArgValues(OPT_backend_option); Opts.NumRegisterParameters = getLastArgIntValue(Args, OPT_mregparm, 0, Diags); - Opts.NoGlobalMerge = Args.hasArg(OPT_mno_global_merge); Opts.NoExecStack = Args.hasArg(OPT_mno_exec_stack); Opts.FatalWarnings = Args.hasArg(OPT_massembler_fatal_warnings); Opts.EnableSegmentedStacks = Args.hasArg(OPT_split_stacks); @@ -472,8 +484,13 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK, OPT_fno_function_sections, false); Opts.DataSections = Args.hasFlag(OPT_fdata_sections, OPT_fno_data_sections, false); + Opts.UniqueSectionNames = Args.hasFlag(OPT_funique_section_names, + OPT_fno_unique_section_names, true); + Opts.MergeFunctions = Args.hasArg(OPT_fmerge_functions); + Opts.MSVolatile = Args.hasArg(OPT_fms_volatile); + Opts.VectorizeBB = Args.hasArg(OPT_vectorize_slp_aggressive); Opts.VectorizeLoop = Args.hasArg(OPT_vectorize_loops); Opts.VectorizeSLP = Args.hasArg(OPT_vectorize_slp); @@ -489,6 +506,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK, Opts.CoverageExtraChecksum = Args.hasArg(OPT_coverage_cfg_checksum); Opts.CoverageNoFunctionNamesInData = Args.hasArg(OPT_coverage_no_function_names_in_data); + Opts.CoverageExitBlockBeforeBody = + Args.hasArg(OPT_coverage_exit_block_before_body); if (Args.hasArg(OPT_coverage_version_EQ)) { StringRef CoverageVersion = Args.getLastArgValue(OPT_coverage_version_EQ); if (CoverageVersion.size() != 4) { @@ -507,8 +526,14 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK, Opts.CompressDebugSections = Args.hasArg(OPT_compress_debug_sections); Opts.DebugCompilationDir = Args.getLastArgValue(OPT_fdebug_compilation_dir); Opts.LinkBitcodeFile = Args.getLastArgValue(OPT_mlink_bitcode_file); - Opts.SanitizeCoverage = - getLastArgIntValue(Args, OPT_fsanitize_coverage, 0, Diags); + Opts.SanitizeCoverageType = + getLastArgIntValue(Args, OPT_fsanitize_coverage_type, 0, Diags); + Opts.SanitizeCoverageIndirectCalls = + Args.hasArg(OPT_fsanitize_coverage_indirect_calls); + Opts.SanitizeCoverageTraceBB = Args.hasArg(OPT_fsanitize_coverage_trace_bb); + Opts.SanitizeCoverageTraceCmp = Args.hasArg(OPT_fsanitize_coverage_trace_cmp); + Opts.SanitizeCoverage8bitCounters = + Args.hasArg(OPT_fsanitize_coverage_8bit_counters); Opts.SanitizeMemoryTrackOrigins = getLastArgIntValue(Args, OPT_fsanitize_memory_track_origins_EQ, 0, Diags); Opts.SanitizeUndefinedTrapOnError = @@ -523,6 +548,13 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK, Opts.StackAlignment = StackAlignment; } + if (Arg *A = Args.getLastArg(OPT_mstack_probe_size)) { + StringRef Val = A->getValue(); + unsigned StackProbeSize = Opts.StackProbeSize; + Val.getAsInteger(0, StackProbeSize); + Opts.StackProbeSize = StackProbeSize; + } + if (Arg *A = Args.getLastArg(OPT_fobjc_dispatch_method_EQ)) { StringRef Name = A->getValue(); unsigned Method = llvm::StringSwitch<unsigned>(Name) @@ -616,6 +648,9 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK, Args.getAllArgValues(OPT_fsanitize_recover_EQ), Diags, Opts.SanitizeRecover); + Opts.CudaGpuBinaryFileNames = + Args.getAllArgValues(OPT_fcuda_include_gpubinary); + return Success; } @@ -634,6 +669,8 @@ static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts, Opts.DOTOutputFile = Args.getLastArgValue(OPT_dependency_dot); Opts.ModuleDependencyOutputDir = Args.getLastArgValue(OPT_module_dependency_dir); + if (Args.hasArg(OPT_MV)) + Opts.OutputFormat = DependencyOutputFormat::NMake; } bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args, @@ -702,9 +739,9 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args, if (Format == "clang") Opts.setFormat(DiagnosticOptions::Clang); else if (Format == "msvc") - Opts.setFormat(DiagnosticOptions::Msvc); + Opts.setFormat(DiagnosticOptions::MSVC); else if (Format == "msvc-fallback") { - Opts.setFormat(DiagnosticOptions::Msvc); + Opts.setFormat(DiagnosticOptions::MSVC); Opts.CLFallbackMode = true; } else if (Format == "vi") Opts.setFormat(DiagnosticOptions::Vi); @@ -966,6 +1003,7 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, .Case("cpp-output", IK_PreprocessedC) .Case("assembler-with-cpp", IK_Asm) .Case("c++-cpp-output", IK_PreprocessedCXX) + .Case("cuda-cpp-output", IK_PreprocessedCuda) .Case("objective-c-cpp-output", IK_PreprocessedObjC) .Case("objc-cpp-output", IK_PreprocessedObjC) .Case("objective-c++-cpp-output", IK_PreprocessedObjCXX) @@ -1169,6 +1207,7 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK, LangStd = LangStandard::lang_opencl; break; case IK_CUDA: + case IK_PreprocessedCuda: LangStd = LangStandard::lang_cuda; break; case IK_Asm: @@ -1197,7 +1236,7 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK, Opts.CPlusPlus1z = Std.isCPlusPlus1z(); Opts.Digraphs = Std.hasDigraphs(); Opts.GNUMode = Std.isGNUMode(); - Opts.GNUInline = !Std.isC99(); + Opts.GNUInline = Std.isC89(); Opts.HexFloats = Std.hasHexFloats(); Opts.ImplicitInt = Std.hasImplicitInt(); @@ -1221,7 +1260,8 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK, Opts.NativeHalfType = 1; } - Opts.CUDA = LangStd == LangStandard::lang_cuda || IK == IK_CUDA; + Opts.CUDA = IK == IK_CUDA || IK == IK_PreprocessedCuda || + LangStd == LangStandard::lang_cuda; // OpenCL and C++ both have bool, true, false keywords. Opts.Bool = Opts.OpenCL || Opts.CPlusPlus; @@ -1236,9 +1276,6 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK, Opts.CXXOperatorNames = Opts.CPlusPlus; Opts.DollarIdents = !Opts.AsmPreprocessor; - - // C++14 onwards has sized global deallocation functions. - Opts.SizedDeallocation = Opts.CPlusPlus14; } /// Attempt to parse a visibility value out of the given argument. @@ -1259,43 +1296,6 @@ static Visibility parseVisibility(Arg *arg, ArgList &args, return DefaultVisibility; } -static unsigned parseMSCVersion(ArgList &Args, DiagnosticsEngine &Diags) { - auto Arg = Args.getLastArg(OPT_fms_compatibility_version); - if (!Arg) - return 0; - - // The MSC versioning scheme involves four versioning components: - // - Major - // - Minor - // - Build - // - Patch - // - // We accept either the old style (_MSC_VER) value, or a _MSC_FULL_VER value. - // Additionally, the value may be provided in the form of a more readable - // MM.mm.bbbbb.pp version. - // - // Unfortunately, due to the bit-width limitations, we cannot currently encode - // the value for the patch level. - - unsigned VC[4] = {0}; - StringRef Value = Arg->getValue(); - SmallVector<StringRef, 4> Components; - - Value.split(Components, ".", llvm::array_lengthof(VC)); - for (unsigned CI = 0, - CE = std::min(Components.size(), llvm::array_lengthof(VC)); - CI < CE; ++CI) { - if (Components[CI].getAsInteger(10, VC[CI])) { - Diags.Report(diag::err_drv_invalid_value) - << Arg->getAsString(Args) << Value; - return 0; - } - } - - // FIXME we cannot encode the patch level - return VC[0] * 10000000 + VC[1] * 100000 + VC[2]; -} - static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK, DiagnosticsEngine &Diags) { // FIXME: Cleanup per-file based stuff. @@ -1336,6 +1336,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK, << A->getAsString(Args) << "OpenCL"; break; case IK_CUDA: + case IK_PreprocessedCuda: if (!Std.isCPlusPlus()) Diags.Report(diag::err_drv_argument_not_allowed_with) << A->getAsString(Args) << "CUDA"; @@ -1381,6 +1382,12 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK, if (Args.hasArg(OPT_fcuda_is_device)) Opts.CUDAIsDevice = 1; + if (Args.hasArg(OPT_fcuda_allow_host_calls_from_host_device)) + Opts.CUDAAllowHostCallsFromHostDevice = 1; + + if (Args.hasArg(OPT_fcuda_disable_target_call_checks)) + Opts.CUDADisableTargetCallChecks = 1; + if (Opts.ObjC1) { if (Arg *arg = Args.getLastArg(OPT_fobjc_runtime_EQ)) { StringRef value = arg->getValue(); @@ -1412,8 +1419,13 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK, (Opts.ObjCRuntime.getKind() == ObjCRuntime::FragileMacOSX); } - if (Args.hasArg(OPT_fgnu89_inline)) - Opts.GNUInline = 1; + if (Args.hasArg(OPT_fgnu89_inline)) { + if (Opts.CPlusPlus) + Diags.Report(diag::err_drv_argument_not_allowed_with) << "-fgnu89-inline" + << "C++/ObjC++"; + else + Opts.GNUInline = 1; + } if (Args.hasArg(OPT_fapple_kext)) { if (!Opts.CPlusPlus) @@ -1462,7 +1474,16 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK, Opts.MSVCCompat = Args.hasArg(OPT_fms_compatibility); Opts.MicrosoftExt = Opts.MSVCCompat || Args.hasArg(OPT_fms_extensions); Opts.AsmBlocks = Args.hasArg(OPT_fasm_blocks) || Opts.MicrosoftExt; - Opts.MSCompatibilityVersion = parseMSCVersion(Args, Diags); + Opts.MSCompatibilityVersion = 0; + if (const Arg *A = Args.getLastArg(OPT_fms_compatibility_version)) { + VersionTuple VT; + if (VT.tryParse(A->getValue())) + Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) + << A->getValue(); + Opts.MSCompatibilityVersion = VT.getMajor() * 10000000 + + VT.getMinor().getValueOr(0) * 100000 + + VT.getSubminor().getValueOr(0); + } // Mimicing gcc's behavior, trigraphs are only enabled if -trigraphs // is specified, or -std is set to a conforming mode. @@ -1498,12 +1519,15 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK, Opts.ModulesStrictDeclUse = Args.hasArg(OPT_fmodules_strict_decluse); Opts.ModulesDeclUse = Args.hasArg(OPT_fmodules_decluse) || Opts.ModulesStrictDeclUse; + Opts.ModulesLocalVisibility = + Args.hasArg(OPT_fmodules_local_submodule_visibility); Opts.ModulesSearchAll = Opts.Modules && !Args.hasArg(OPT_fno_modules_search_all) && Args.hasArg(OPT_fmodules_search_all); Opts.ModulesErrorRecovery = !Args.hasArg(OPT_fno_modules_error_recovery); Opts.ModulesImplicitMaps = Args.hasFlag(OPT_fmodules_implicit_maps, OPT_fno_modules_implicit_maps, true); + Opts.ImplicitModules = !Args.hasArg(OPT_fno_implicit_modules); Opts.CharIsSigned = Opts.OpenCL || !Args.hasArg(OPT_fno_signed_char); Opts.WChar = Opts.CPlusPlus && !Args.hasArg(OPT_fno_wchar); Opts.ShortWChar = Args.hasFlag(OPT_fshort_wchar, OPT_fno_short_wchar, false); @@ -1512,7 +1536,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK, Opts.NoBuiltin = Args.hasArg(OPT_fno_builtin) || Opts.Freestanding; Opts.NoMathBuiltin = Args.hasArg(OPT_fno_math_builtin); Opts.AssumeSaneOperatorNew = !Args.hasArg(OPT_fno_assume_sane_operator_new); - Opts.SizedDeallocation |= Args.hasArg(OPT_fsized_deallocation); + Opts.SizedDeallocation = Args.hasArg(OPT_fsized_deallocation); + Opts.ConceptsTS = Args.hasArg(OPT_fconcepts_ts); Opts.HeinousExtensions = Args.hasArg(OPT_fheinous_gnu_extensions); Opts.AccessControl = !Args.hasArg(OPT_fno_access_control); Opts.ElideConstructors = !Args.hasArg(OPT_fno_elide_constructors); @@ -1559,10 +1584,13 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK, Opts.DebuggerObjCLiteral = Args.hasArg(OPT_fdebugger_objc_literal); Opts.ApplePragmaPack = Args.hasArg(OPT_fapple_pragma_pack); Opts.CurrentModule = Args.getLastArgValue(OPT_fmodule_name); + Opts.AppExt = Args.hasArg(OPT_fapplication_extension); Opts.ImplementationOfModule = Args.getLastArgValue(OPT_fmodule_implementation_of); - Opts.NativeHalfType = Opts.NativeHalfType; + Opts.ModuleFeatures = Args.getAllArgValues(OPT_fmodule_feature); + Opts.NativeHalfType |= Args.hasArg(OPT_fnative_half_type); Opts.HalfArgsAndReturns = Args.hasArg(OPT_fallow_half_arguments_and_returns); + Opts.GNUAsm = !Args.hasArg(OPT_fno_gnu_inline_asm); if (!Opts.CurrentModule.empty() && !Opts.ImplementationOfModule.empty() && Opts.CurrentModule != Opts.ImplementationOfModule) { @@ -1570,6 +1598,12 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK, << Opts.CurrentModule << Opts.ImplementationOfModule; } + // For now, we only support local submodule visibility in C++ (because we + // heavily depend on the ODR for merging redefinitions). + if (Opts.ModulesLocalVisibility && !Opts.CPlusPlus) + Diags.Report(diag::err_drv_argument_not_allowed_with) + << "-fmodules-local-submodule-visibility" << "C"; + if (Arg *A = Args.getLastArg(OPT_faddress_space_map_mangling_EQ)) { switch (llvm::StringSwitch<unsigned>(A->getValue()) .Case("target", LangOptions::ASMM_Target) @@ -1610,12 +1644,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK, Opts.setMSPointerToMemberRepresentationMethod(InheritanceModel); } - // Check if -fopenmp= is specified. - if (const Arg *A = Args.getLastArg(options::OPT_fopenmp_EQ)) { - Opts.OpenMP = llvm::StringSwitch<bool>(A->getValue()) - .Case("libiomp5", true) - .Default(false); - } + // Check if -fopenmp is specified. + Opts.OpenMP = Args.hasArg(options::OPT_fopenmp); // Record whether the __DEPRECATED define was requested. Opts.Deprecated = Args.hasFlag(OPT_fdeprecated_macro, @@ -1660,7 +1690,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK, // -fsanitize-address-field-padding=N has to be a LangOpt, parse it here. Opts.SanitizeAddressFieldPadding = getLastArgIntValue(Args, OPT_fsanitize_address_field_padding, 0, Diags); - Opts.SanitizerBlacklistFile = Args.getLastArgValue(OPT_fsanitize_blacklist); + Opts.SanitizerBlacklistFiles = Args.getAllArgValues(OPT_fsanitize_blacklist); } static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args, @@ -1805,6 +1835,7 @@ static void ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts, Opts.ShowMacroComments = Args.hasArg(OPT_CC); Opts.ShowMacros = Args.hasArg(OPT_dM) || Args.hasArg(OPT_dD); Opts.RewriteIncludes = Args.hasArg(OPT_frewrite_includes); + Opts.UseLineDirectives = Args.hasArg(OPT_fuse_line_directives); } static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args) { @@ -2002,7 +2033,7 @@ std::string CompilerInvocation::getModuleHash() const { llvm::sys::path::append(systemVersionFile, "SystemVersion.plist"); llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> buffer = - llvm::MemoryBuffer::getFile(systemVersionFile.str()); + llvm::MemoryBuffer::getFile(systemVersionFile); if (buffer) { code = hash_combine(code, buffer.get()->getBuffer()); diff --git a/contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp b/contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp index 6ea8f51..0995ab4 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp @@ -150,6 +150,8 @@ class DFGImpl : public PPCallbacks { bool AddMissingHeaderDeps; bool SeenMissingHeader; bool IncludeModuleFiles; + DependencyOutputFormat OutputFormat; + private: bool FileMatchesDepCriteria(const char *Filename, SrcMgr::CharacteristicKind FileType); @@ -162,7 +164,8 @@ public: PhonyTarget(Opts.UsePhonyTargets), AddMissingHeaderDeps(Opts.AddMissingHeaderDeps), SeenMissingHeader(false), - IncludeModuleFiles(Opts.IncludeModuleFiles) {} + IncludeModuleFiles(Opts.IncludeModuleFiles), + OutputFormat(Opts.OutputFormat) {} void FileChanged(SourceLocation Loc, FileChangeReason Reason, SrcMgr::CharacteristicKind FileType, @@ -289,13 +292,76 @@ void DFGImpl::AddFilename(StringRef Filename) { Files.push_back(Filename); } -/// PrintFilename - GCC escapes spaces, # and $, but apparently not ' or " or -/// other scary characters. -static void PrintFilename(raw_ostream &OS, StringRef Filename) { +/// Print the filename, with escaping or quoting that accommodates the three +/// most likely tools that use dependency files: GNU Make, BSD Make, and +/// NMake/Jom. +/// +/// BSD Make is the simplest case: It does no escaping at all. This means +/// characters that are normally delimiters, i.e. space and # (the comment +/// character) simply aren't supported in filenames. +/// +/// GNU Make does allow space and # in filenames, but to avoid being treated +/// as a delimiter or comment, these must be escaped with a backslash. Because +/// backslash is itself the escape character, if a backslash appears in a +/// filename, it should be escaped as well. (As a special case, $ is escaped +/// as $$, which is the normal Make way to handle the $ character.) +/// For compatibility with BSD Make and historical practice, if GNU Make +/// un-escapes characters in a filename but doesn't find a match, it will +/// retry with the unmodified original string. +/// +/// GCC tries to accommodate both Make formats by escaping any space or # +/// characters in the original filename, but not escaping backslashes. The +/// apparent intent is so that filenames with backslashes will be handled +/// correctly by BSD Make, and by GNU Make in its fallback mode of using the +/// unmodified original string; filenames with # or space characters aren't +/// supported by BSD Make at all, but will be handled correctly by GNU Make +/// due to the escaping. +/// +/// A corner case that GCC gets only partly right is when the original filename +/// has a backslash immediately followed by space or #. GNU Make would expect +/// this backslash to be escaped; however GCC escapes the original backslash +/// only when followed by space, not #. It will therefore take a dependency +/// from a directive such as +/// #include "a\ b\#c.h" +/// and emit it as +/// a\\\ b\\#c.h +/// which GNU Make will interpret as +/// a\ b\ +/// followed by a comment. Failing to find this file, it will fall back to the +/// original string, which probably doesn't exist either; in any case it won't +/// find +/// a\ b\#c.h +/// which is the actual filename specified by the include directive. +/// +/// Clang does what GCC does, rather than what GNU Make expects. +/// +/// NMake/Jom has a different set of scary characters, but wraps filespecs in +/// double-quotes to avoid misinterpreting them; see +/// https://msdn.microsoft.com/en-us/library/dd9y37ha.aspx for NMake info, +/// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx +/// for Windows file-naming info. +static void PrintFilename(raw_ostream &OS, StringRef Filename, + DependencyOutputFormat OutputFormat) { + if (OutputFormat == DependencyOutputFormat::NMake) { + // Add quotes if needed. These are the characters listed as "special" to + // NMake, that are legal in a Windows filespec, and that could cause + // misinterpretation of the dependency string. + if (Filename.find_first_of(" #${}^!") != StringRef::npos) + OS << '\"' << Filename << '\"'; + else + OS << Filename; + return; + } + assert(OutputFormat == DependencyOutputFormat::Make); for (unsigned i = 0, e = Filename.size(); i != e; ++i) { - if (Filename[i] == ' ' || Filename[i] == '#') + if (Filename[i] == '#') // Handle '#' the broken gcc way. + OS << '\\'; + else if (Filename[i] == ' ') { // Handle space correctly. OS << '\\'; - else if (Filename[i] == '$') // $ is escaped by $$. + unsigned j = i; + while (j > 0 && Filename[--j] == '\\') + OS << '\\'; + } else if (Filename[i] == '$') // $ is escaped by $$. OS << '$'; OS << Filename[i]; } @@ -354,7 +420,7 @@ void DFGImpl::OutputDependencyFile() { Columns = 2; } OS << ' '; - PrintFilename(OS, *I); + PrintFilename(OS, *I, OutputFormat); Columns += N + 1; } OS << '\n'; @@ -365,7 +431,7 @@ void DFGImpl::OutputDependencyFile() { for (std::vector<std::string>::iterator I = Files.begin() + 1, E = Files.end(); I != E; ++I) { OS << '\n'; - PrintFilename(OS, *I); + PrintFilename(OS, *I, OutputFormat); OS << ":\n"; } } diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp index c81c81a..9bba755 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp @@ -44,7 +44,7 @@ public: explicit DelegatingDeserializationListener( ASTDeserializationListener *Previous, bool DeletePrevious) : Previous(Previous), DeletePrevious(DeletePrevious) {} - virtual ~DelegatingDeserializationListener() { + ~DelegatingDeserializationListener() override { if (DeletePrevious) delete Previous; } @@ -71,7 +71,7 @@ public: Previous->SelectorRead(ID, Sel); } void MacroDefinitionRead(serialization::PreprocessedEntityID PPID, - MacroDefinition *MD) override { + MacroDefinitionRecord *MD) override { if (Previous) Previous->MacroDefinitionRead(PPID, MD); } @@ -262,18 +262,20 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI, FileManager &FileMgr = CI.getFileManager(); PreprocessorOptions &PPOpts = CI.getPreprocessorOpts(); StringRef PCHInclude = PPOpts.ImplicitPCHInclude; + std::string SpecificModuleCachePath = CI.getSpecificModuleCachePath(); if (const DirectoryEntry *PCHDir = FileMgr.getDirectory(PCHInclude)) { std::error_code EC; SmallString<128> DirNative; llvm::sys::path::native(PCHDir->getName(), DirNative); bool Found = false; - for (llvm::sys::fs::directory_iterator Dir(DirNative.str(), EC), DirEnd; + for (llvm::sys::fs::directory_iterator Dir(DirNative, EC), DirEnd; Dir != DirEnd && !EC; Dir.increment(EC)) { // Check whether this is an acceptable AST file. if (ASTReader::isAcceptableASTFile(Dir->path(), FileMgr, CI.getLangOpts(), CI.getTargetOpts(), - CI.getPreprocessorOpts())) { + CI.getPreprocessorOpts(), + SpecificModuleCachePath)) { PPOpts.ImplicitPCHInclude = Dir->path(); Found = true; break; @@ -383,6 +385,15 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI, "doesn't support modules"); } + // If we were asked to load any module map files, do so now. + for (const auto &Filename : CI.getFrontendOpts().ModuleMapFiles) { + if (auto *File = CI.getFileManager().getFile(Filename)) + CI.getPreprocessor().getHeaderSearchInfo().loadModuleMapFile( + File, /*IsSystem*/false); + else + CI.getDiagnostics().Report(diag::err_module_map_not_found) << Filename; + } + // If we were asked to load any module files, do so now. for (const auto &ModuleFile : CI.getFrontendOpts().ModuleFiles) if (!CI.loadModuleFile(ModuleFile)) @@ -457,16 +468,12 @@ void FrontendAction::EndSourceFile() { // FIXME: There is more per-file stuff we could just drop here? bool DisableFree = CI.getFrontendOpts().DisableFree; if (DisableFree) { - if (!isCurrentFileAST()) { - CI.resetAndLeakSema(); - CI.resetAndLeakASTContext(); - } + CI.resetAndLeakSema(); + CI.resetAndLeakASTContext(); BuryPointer(CI.takeASTConsumer().get()); } else { - if (!isCurrentFileAST()) { - CI.setSema(nullptr); - CI.setASTContext(nullptr); - } + CI.setSema(nullptr); + CI.setASTContext(nullptr); CI.setASTConsumer(nullptr); } @@ -483,13 +490,16 @@ void FrontendAction::EndSourceFile() { // FrontendAction. CI.clearOutputFiles(/*EraseFiles=*/shouldEraseOutputFiles()); - // FIXME: Only do this if DisableFree is set. if (isCurrentFileAST()) { - CI.resetAndLeakSema(); - CI.resetAndLeakASTContext(); - CI.resetAndLeakPreprocessor(); - CI.resetAndLeakSourceManager(); - CI.resetAndLeakFileManager(); + if (DisableFree) { + CI.resetAndLeakPreprocessor(); + CI.resetAndLeakSourceManager(); + CI.resetAndLeakFileManager(); + } else { + CI.setPreprocessor(nullptr); + CI.setSourceManager(nullptr); + CI.setFileManager(nullptr); + } } setCompilerInstance(nullptr); diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp index 701ef02..46cdeeb 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp @@ -79,8 +79,9 @@ std::unique_ptr<ASTConsumer> GeneratePCHAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) { std::string Sysroot; std::string OutputFile; - raw_ostream *OS = nullptr; - if (ComputeASTConsumerArguments(CI, InFile, Sysroot, OutputFile, OS)) + raw_ostream *OS = + ComputeASTConsumerArguments(CI, InFile, Sysroot, OutputFile); + if (!OS) return nullptr; if (!CI.getFrontendOpts().RelocatablePCH) @@ -89,28 +90,27 @@ GeneratePCHAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) { nullptr, Sysroot, OS); } -bool GeneratePCHAction::ComputeASTConsumerArguments(CompilerInstance &CI, - StringRef InFile, - std::string &Sysroot, - std::string &OutputFile, - raw_ostream *&OS) { +raw_ostream *GeneratePCHAction::ComputeASTConsumerArguments( + CompilerInstance &CI, StringRef InFile, std::string &Sysroot, + std::string &OutputFile) { Sysroot = CI.getHeaderSearchOpts().Sysroot; if (CI.getFrontendOpts().RelocatablePCH && Sysroot.empty()) { CI.getDiagnostics().Report(diag::err_relocatable_without_isysroot); - return true; + return nullptr; } // We use createOutputFile here because this is exposed via libclang, and we // must disable the RemoveFileOnSignal behavior. // We use a temporary to avoid race conditions. - OS = CI.createOutputFile(CI.getFrontendOpts().OutputFile, /*Binary=*/true, - /*RemoveFileOnSignal=*/false, InFile, - /*Extension=*/"", /*useTemporary=*/true); + raw_ostream *OS = + CI.createOutputFile(CI.getFrontendOpts().OutputFile, /*Binary=*/true, + /*RemoveFileOnSignal=*/false, InFile, + /*Extension=*/"", /*useTemporary=*/true); if (!OS) - return true; + return nullptr; OutputFile = CI.getFrontendOpts().OutputFile; - return false; + return OS; } std::unique_ptr<ASTConsumer> @@ -118,8 +118,9 @@ GenerateModuleAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) { std::string Sysroot; std::string OutputFile; - raw_ostream *OS = nullptr; - if (ComputeASTConsumerArguments(CI, InFile, Sysroot, OutputFile, OS)) + raw_ostream *OS = + ComputeASTConsumerArguments(CI, InFile, Sysroot, OutputFile); + if (!OS) return nullptr; return llvm::make_unique<PCHGenerator>(CI.getPreprocessor(), OutputFile, @@ -151,22 +152,6 @@ static std::error_code addHeaderInclude(StringRef HeaderName, return std::error_code(); } -static std::error_code addHeaderInclude(const FileEntry *Header, - SmallVectorImpl<char> &Includes, - const LangOptions &LangOpts, - bool IsExternC) { - // Use an absolute path if we don't have a filename as written in the module - // map file; this ensures that we will identify the right file independent of - // header search paths. - if (llvm::sys::path::is_absolute(Header->getName())) - return addHeaderInclude(Header->getName(), Includes, LangOpts, IsExternC); - - SmallString<256> AbsName(Header->getName()); - if (std::error_code Err = llvm::sys::fs::make_absolute(AbsName)) - return Err; - return addHeaderInclude(AbsName, Includes, LangOpts, IsExternC); -} - /// \brief Collect the set of header includes needed to construct the given /// module and update the TopHeaders file set of the module. /// @@ -195,21 +180,21 @@ collectModuleHeaderIncludes(const LangOptions &LangOpts, FileManager &FileMgr, } // Note that Module->PrivateHeaders will not be a TopHeader. - if (const FileEntry *UmbrellaHeader = Module->getUmbrellaHeader()) { - // FIXME: Track the name as written here. - Module->addTopHeader(UmbrellaHeader); + if (Module::Header UmbrellaHeader = Module->getUmbrellaHeader()) { + Module->addTopHeader(UmbrellaHeader.Entry); if (Module->Parent) { // Include the umbrella header for submodules. - if (std::error_code Err = addHeaderInclude(UmbrellaHeader, Includes, - LangOpts, Module->IsExternC)) + if (std::error_code Err = addHeaderInclude(UmbrellaHeader.NameAsWritten, + Includes, LangOpts, + Module->IsExternC)) return Err; } - } else if (const DirectoryEntry *UmbrellaDir = Module->getUmbrellaDir()) { + } else if (Module::DirectoryName UmbrellaDir = Module->getUmbrellaDir()) { // Add all of the headers we find in this subdirectory. std::error_code EC; SmallString<128> DirNative; - llvm::sys::path::native(UmbrellaDir->getName(), DirNative); - for (llvm::sys::fs::recursive_directory_iterator Dir(DirNative.str(), EC), + llvm::sys::path::native(UmbrellaDir.Entry->getName(), DirNative); + for (llvm::sys::fs::recursive_directory_iterator Dir(DirNative, EC), DirEnd; Dir != DirEnd && !EC; Dir.increment(EC)) { // Check whether this entry has an extension typically associated with @@ -230,11 +215,20 @@ collectModuleHeaderIncludes(const LangOptions &LangOpts, FileManager &FileMgr, if (ModMap.isHeaderUnavailableInModule(Header, Module)) continue; + // Compute the relative path from the directory to this file. + SmallVector<StringRef, 16> Components; + auto PathIt = llvm::sys::path::rbegin(Dir->path()); + for (int I = 0; I != Dir.level() + 1; ++I, ++PathIt) + Components.push_back(*PathIt); + SmallString<128> RelativeHeader(UmbrellaDir.NameAsWritten); + for (auto It = Components.rbegin(), End = Components.rend(); It != End; + ++It) + llvm::sys::path::append(RelativeHeader, *It); + // Include this header as part of the umbrella directory. - // FIXME: Track the name as written through to here. Module->addTopHeader(Header); - if (std::error_code Err = - addHeaderInclude(Header, Includes, LangOpts, Module->IsExternC)) + if (std::error_code Err = addHeaderInclude(RelativeHeader, Includes, + LangOpts, Module->IsExternC)) return Err; } @@ -326,10 +320,9 @@ bool GenerateModuleAction::BeginSourceFileAction(CompilerInstance &CI, // Collect the set of #includes we need to build the module. SmallString<256> HeaderContents; std::error_code Err = std::error_code(); - if (const FileEntry *UmbrellaHeader = Module->getUmbrellaHeader()) - // FIXME: Track the file name as written. - Err = addHeaderInclude(UmbrellaHeader, HeaderContents, CI.getLangOpts(), - Module->IsExternC); + if (Module::Header UmbrellaHeader = Module->getUmbrellaHeader()) + Err = addHeaderInclude(UmbrellaHeader.NameAsWritten, HeaderContents, + CI.getLangOpts(), Module->IsExternC); if (!Err) Err = collectModuleHeaderIncludes( CI.getLangOpts(), FileMgr, @@ -355,11 +348,9 @@ bool GenerateModuleAction::BeginSourceFileAction(CompilerInstance &CI, return true; } -bool GenerateModuleAction::ComputeASTConsumerArguments(CompilerInstance &CI, - StringRef InFile, - std::string &Sysroot, - std::string &OutputFile, - raw_ostream *&OS) { +raw_ostream *GenerateModuleAction::ComputeASTConsumerArguments( + CompilerInstance &CI, StringRef InFile, std::string &Sysroot, + std::string &OutputFile) { // If no output file was provided, figure out where this module would go // in the module cache. if (CI.getFrontendOpts().OutputFile.empty()) { @@ -368,19 +359,20 @@ bool GenerateModuleAction::ComputeASTConsumerArguments(CompilerInstance &CI, HS.getModuleFileName(CI.getLangOpts().CurrentModule, ModuleMapForUniquing->getName()); } - + // We use createOutputFile here because this is exposed via libclang, and we // must disable the RemoveFileOnSignal behavior. // We use a temporary to avoid race conditions. - OS = CI.createOutputFile(CI.getFrontendOpts().OutputFile, /*Binary=*/true, - /*RemoveFileOnSignal=*/false, InFile, - /*Extension=*/"", /*useTemporary=*/true, - /*CreateMissingDirectories=*/true); + raw_ostream *OS = + CI.createOutputFile(CI.getFrontendOpts().OutputFile, /*Binary=*/true, + /*RemoveFileOnSignal=*/false, InFile, + /*Extension=*/"", /*useTemporary=*/true, + /*CreateMissingDirectories=*/true); if (!OS) - return true; - + return nullptr; + OutputFile = CI.getFrontendOpts().OutputFile; - return false; + return OS; } std::unique_ptr<ASTConsumer> @@ -462,8 +454,8 @@ namespace { return false; } - bool ReadTargetOptions(const TargetOptions &TargetOpts, - bool Complain) override { + bool ReadTargetOptions(const TargetOptions &TargetOpts, bool Complain, + bool AllowCompatibleDifferences) override { Out.indent(2) << "Target options:\n"; Out.indent(4) << " Triple: " << TargetOpts.Triple << "\n"; Out.indent(4) << " CPU: " << TargetOpts.CPU << "\n"; @@ -480,9 +472,8 @@ namespace { return false; } - virtual bool - ReadDiagnosticOptions(IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts, - bool Complain) override { + bool ReadDiagnosticOptions(IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts, + bool Complain) override { Out.indent(2) << "Diagnostic options:\n"; #define DIAGOPT(Name, Bits, Default) DUMP_BOOLEAN(DiagOpts->Name, #Name); #define ENUM_DIAGOPT(Name, Type, Bits, Default) \ @@ -501,9 +492,11 @@ namespace { } bool ReadHeaderSearchOptions(const HeaderSearchOptions &HSOpts, + StringRef SpecificModuleCachePath, bool Complain) override { Out.indent(2) << "Header search options:\n"; Out.indent(4) << "System root [-isysroot=]: '" << HSOpts.Sysroot << "'\n"; + Out.indent(4) << "Module Cache: '" << SpecificModuleCachePath << "'\n"; DUMP_BOOLEAN(HSOpts.UseBuiltinIncludes, "Use builtin include directories [-nobuiltininc]"); DUMP_BOOLEAN(HSOpts.UseStandardSystemIncludes, @@ -598,15 +591,9 @@ void DumpTokensAction::ExecuteAction() { void GeneratePTHAction::ExecuteAction() { CompilerInstance &CI = getCompilerInstance(); - if (CI.getFrontendOpts().OutputFile.empty() || - CI.getFrontendOpts().OutputFile == "-") { - // FIXME: Don't fail this way. - // FIXME: Verify that we can actually seek in the given file. - llvm::report_fatal_error("PTH requires a seekable file for output!"); - } - llvm::raw_fd_ostream *OS = - CI.createDefaultOutputFile(true, getCurrentFile()); - if (!OS) return; + raw_pwrite_stream *OS = CI.createDefaultOutputFile(true, getCurrentFile()); + if (!OS) + return; CacheTokens(CI.getPreprocessor(), OS); } @@ -688,6 +675,7 @@ void PrintPreambleAction::ExecuteAction() { case IK_None: case IK_Asm: case IK_PreprocessedC: + case IK_PreprocessedCuda: case IK_PreprocessedCXX: case IK_PreprocessedObjC: case IK_PreprocessedObjCXX: diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp index 1869d0c..9ede674 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp @@ -18,6 +18,7 @@ InputKind FrontendOptions::getInputKindForExtension(StringRef Extension) { .Cases("S", "s", IK_Asm) .Case("i", IK_PreprocessedC) .Case("ii", IK_PreprocessedCXX) + .Case("cui", IK_PreprocessedCuda) .Case("m", IK_ObjC) .Case("mi", IK_PreprocessedObjC) .Cases("mm", "M", IK_ObjCXX) diff --git a/contrib/llvm/tools/clang/lib/Frontend/HeaderIncludeGen.cpp b/contrib/llvm/tools/clang/lib/Frontend/HeaderIncludeGen.cpp index 2701194..5732e5b 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/HeaderIncludeGen.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/HeaderIncludeGen.cpp @@ -35,7 +35,7 @@ public: OwnsOutputFile(OwnsOutputFile_), ShowAllHeaders(ShowAllHeaders_), ShowDepth(ShowDepth_), MSStyle(MSStyle_) {} - ~HeaderIncludesCallback() { + ~HeaderIncludesCallback() override { if (OwnsOutputFile) delete OutputFile; } diff --git a/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp b/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp index a518a0a..2bd999e 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp @@ -227,10 +227,12 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple, if (HSOpts.UseStandardSystemIncludes) { switch (os) { + case llvm::Triple::CloudABI: case llvm::Triple::FreeBSD: case llvm::Triple::NetBSD: case llvm::Triple::OpenBSD: case llvm::Triple::Bitrig: + case llvm::Triple::NaCl: break; default: // FIXME: temporary hack: hard-coded paths. @@ -246,7 +248,7 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple, // supplied path. SmallString<128> P = StringRef(HSOpts.ResourceDir); llvm::sys::path::append(P, "include"); - AddUnmappedPath(P.str(), ExternCSystem, false); + AddUnmappedPath(P, ExternCSystem, false); } // All remaining additions are for system include directories, early exit if @@ -270,6 +272,14 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple, case llvm::Triple::Linux: llvm_unreachable("Include management is handled in the driver."); + case llvm::Triple::CloudABI: { + // <sysroot>/<triple>/include + SmallString<128> P = StringRef(HSOpts.ResourceDir); + llvm::sys::path::append(P, "../../..", triple.str(), "include"); + AddPath(P, System, false); + break; + } + case llvm::Triple::Haiku: AddPath("/boot/common/include", System, false); AddPath("/boot/develop/headers/os", System, false); @@ -317,18 +327,18 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple, // <sysroot>/i686-w64-mingw32/include SmallString<128> P = StringRef(HSOpts.ResourceDir); llvm::sys::path::append(P, "../../../i686-w64-mingw32/include"); - AddPath(P.str(), System, false); + AddPath(P, System, false); // <sysroot>/x86_64-w64-mingw32/include P.resize(HSOpts.ResourceDir.size()); llvm::sys::path::append(P, "../../../x86_64-w64-mingw32/include"); - AddPath(P.str(), System, false); + AddPath(P, System, false); // mingw.org crt include paths // <sysroot>/include P.resize(HSOpts.ResourceDir.size()); llvm::sys::path::append(P, "../../../include"); - AddPath(P.str(), System, false); + AddPath(P, System, false); AddPath("/mingw/include", System, false); #if defined(LLVM_ON_WIN32) AddPath("c:/mingw/include", System, false); @@ -340,8 +350,15 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple, break; } - if ( os != llvm::Triple::RTEMS ) + switch (os) { + case llvm::Triple::CloudABI: + case llvm::Triple::RTEMS: + case llvm::Triple::NaCl: + break; + default: AddPath("/usr/include", ExternCSystem, false); + break; + } } void InitHeaderSearch:: @@ -488,7 +505,7 @@ void InitHeaderSearch::AddDefaultIncludePaths(const LangOptions &Lang, // Get foo/include/c++/v1 llvm::sys::path::append(P, "include", "c++", "v1"); - AddUnmappedPath(P.str(), CXXSystem, false); + AddUnmappedPath(P, CXXSystem, false); } } // On Solaris, include the support directory for things like xlocale and @@ -699,7 +716,7 @@ void clang::ApplyHeaderSearchOptions(HeaderSearch &HS, // Set up the builtin include directory in the module map. SmallString<128> P = StringRef(HSOpts.ResourceDir); llvm::sys::path::append(P, "include"); - if (const DirectoryEntry *Dir = HS.getFileMgr().getDirectory(P.str())) + if (const DirectoryEntry *Dir = HS.getFileMgr().getDirectory(P)) HS.getModuleMap().setBuiltinIncludeDir(Dir); } diff --git a/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp b/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp index 3550ac2..dfc46f4 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp @@ -133,6 +133,7 @@ static void DefineFloatMacros(MacroBuilder &Builder, StringRef Prefix, "4.94065645841246544176568792868221e-324", "6.47517511943802511092443895822764655e-4966"); int Digits = PickFP(Sem, 6, 15, 18, 31, 33); + int DecimalDigits = PickFP(Sem, 9, 17, 21, 33, 36); Epsilon = PickFP(Sem, "1.19209290e-7", "2.2204460492503131e-16", "1.08420217248550443401e-19", "4.94065645841246544176568792868221e-324", @@ -159,6 +160,7 @@ static void DefineFloatMacros(MacroBuilder &Builder, StringRef Prefix, Builder.defineMacro(DefPrefix + "DENORM_MIN__", Twine(DenormMin)+Ext); Builder.defineMacro(DefPrefix + "HAS_DENORM__"); Builder.defineMacro(DefPrefix + "DIG__", Twine(Digits)); + Builder.defineMacro(DefPrefix + "DECIMAL_DIG__", Twine(DecimalDigits)); Builder.defineMacro(DefPrefix + "EPSILON__", Twine(Epsilon)+Ext); Builder.defineMacro(DefPrefix + "HAS_INFINITY__"); Builder.defineMacro(DefPrefix + "HAS_QUIET_NAN__"); @@ -451,6 +453,8 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts, } if (LangOpts.SizedDeallocation) Builder.defineMacro("__cpp_sized_deallocation", "201309"); + if (LangOpts.ConceptsTS) + Builder.defineMacro("__cpp_experimental_concepts", "1"); } static void InitializePredefinedMacros(const TargetInfo &TI, @@ -705,6 +709,10 @@ static void InitializePredefinedMacros(const TargetInfo &TI, Builder.defineMacro("__POINTER_WIDTH__", Twine((int)TI.getPointerWidth(0))); + // Define __BIGGEST_ALIGNMENT__ to be compatible with gcc. + Builder.defineMacro("__BIGGEST_ALIGNMENT__", + Twine(TI.getSuitableAlign() / TI.getCharWidth()) ); + if (!LangOpts.CharIsSigned) Builder.defineMacro("__CHAR_UNSIGNED__"); @@ -784,7 +792,7 @@ static void InitializePredefinedMacros(const TargetInfo &TI, Builder.defineMacro("__FINITE_MATH_ONLY__", "0"); if (!LangOpts.MSVCCompat) { - if (LangOpts.GNUInline) + if (LangOpts.GNUInline || LangOpts.CPlusPlus) Builder.defineMacro("__GNUC_GNU_INLINE__"); else Builder.defineMacro("__GNUC_STDC_INLINE__"); @@ -831,8 +839,7 @@ static void InitializePredefinedMacros(const TargetInfo &TI, // Macros to control C99 numerics and <float.h> Builder.defineMacro("__FLT_EVAL_METHOD__", Twine(TI.getFloatEvalMethod())); Builder.defineMacro("__FLT_RADIX__", "2"); - int Dig = PickFP(&TI.getLongDoubleFormat(), -1/*FIXME*/, 17, 21, 33, 36); - Builder.defineMacro("__DECIMAL_DIG__", Twine(Dig)); + Builder.defineMacro("__DECIMAL_DIG__", "__LDBL_DECIMAL_DIG__"); if (LangOpts.getStackProtector() == LangOptions::SSPOn) Builder.defineMacro("__SSP__"); diff --git a/contrib/llvm/tools/clang/lib/Frontend/ModuleDependencyCollector.cpp b/contrib/llvm/tools/clang/lib/Frontend/ModuleDependencyCollector.cpp index 62865e9..67852dc 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/ModuleDependencyCollector.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/ModuleDependencyCollector.cpp @@ -77,10 +77,10 @@ std::error_code ModuleDependencyListener::copyToRoot(StringRef Src) { if (std::error_code EC = fs::create_directories(path::parent_path(Dest), /*IgnoreExisting=*/true)) return EC; - if (std::error_code EC = fs::copy_file(AbsoluteSrc.str(), Dest.str())) + if (std::error_code EC = fs::copy_file(AbsoluteSrc, Dest)) return EC; // Use the absolute path under the root for the file mapping. - Collector.addFileMapping(AbsoluteSrc.str(), Dest.str()); + Collector.addFileMapping(AbsoluteSrc, Dest); return std::error_code(); } diff --git a/contrib/llvm/tools/clang/lib/Frontend/MultiplexConsumer.cpp b/contrib/llvm/tools/clang/lib/Frontend/MultiplexConsumer.cpp index 0198828..219e949 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/MultiplexConsumer.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/MultiplexConsumer.cpp @@ -37,9 +37,10 @@ public: void DeclRead(serialization::DeclID ID, const Decl *D) override; void SelectorRead(serialization::SelectorID iD, Selector Sel) override; void MacroDefinitionRead(serialization::PreprocessedEntityID, - MacroDefinition *MD) override; + MacroDefinitionRecord *MD) override; + private: - std::vector<ASTDeserializationListener*> Listeners; + std::vector<ASTDeserializationListener *> Listeners; }; MultiplexASTDeserializationListener::MultiplexASTDeserializationListener( @@ -78,7 +79,7 @@ void MultiplexASTDeserializationListener::SelectorRead( } void MultiplexASTDeserializationListener::MacroDefinitionRead( - serialization::PreprocessedEntityID ID, MacroDefinition *MD) { + serialization::PreprocessedEntityID ID, MacroDefinitionRecord *MD) { for (size_t i = 0, e = Listeners.size(); i != e; ++i) Listeners[i]->MacroDefinitionRead(ID, MD); } @@ -99,6 +100,8 @@ public: void AddedCXXTemplateSpecialization(const FunctionTemplateDecl *TD, const FunctionDecl *D) override; void DeducedReturnType(const FunctionDecl *FD, QualType ReturnType) override; + void ResolvedOperatorDelete(const CXXDestructorDecl *DD, + const FunctionDecl *Delete) override; void CompletedImplicitDefinition(const FunctionDecl *D) override; void StaticDataMemberInstantiated(const VarDecl *D) override; void AddedObjCCategoryToInterface(const ObjCCategoryDecl *CatD, @@ -108,6 +111,7 @@ public: const ObjCCategoryDecl *ClassExt) override; void DeclarationMarkedUsed(const Decl *D) override; void DeclarationMarkedOpenMPThreadPrivate(const Decl *D) override; + void RedefinedHiddenDefinition(const NamedDecl *D, Module *M) override; private: std::vector<ASTMutationListener*> Listeners; @@ -154,6 +158,11 @@ void MultiplexASTMutationListener::DeducedReturnType(const FunctionDecl *FD, for (size_t i = 0, e = Listeners.size(); i != e; ++i) Listeners[i]->DeducedReturnType(FD, ReturnType); } +void MultiplexASTMutationListener::ResolvedOperatorDelete( + const CXXDestructorDecl *DD, const FunctionDecl *Delete) { + for (auto *L : Listeners) + L->ResolvedOperatorDelete(DD, Delete); +} void MultiplexASTMutationListener::CompletedImplicitDefinition( const FunctionDecl *D) { for (size_t i = 0, e = Listeners.size(); i != e; ++i) @@ -186,6 +195,11 @@ void MultiplexASTMutationListener::DeclarationMarkedOpenMPThreadPrivate( for (size_t i = 0, e = Listeners.size(); i != e; ++i) Listeners[i]->DeclarationMarkedOpenMPThreadPrivate(D); } +void MultiplexASTMutationListener::RedefinedHiddenDefinition(const NamedDecl *D, + Module *M) { + for (auto *L : Listeners) + L->RedefinedHiddenDefinition(D, M); +} } // end namespace clang @@ -292,10 +306,9 @@ void MultiplexConsumer::CompleteTentativeDefinition(VarDecl *D) { Consumer->CompleteTentativeDefinition(D); } -void MultiplexConsumer::HandleVTable( - CXXRecordDecl *RD, bool DefinitionRequired) { +void MultiplexConsumer::HandleVTable(CXXRecordDecl *RD) { for (auto &Consumer : Consumers) - Consumer->HandleVTable(RD, DefinitionRequired); + Consumer->HandleVTable(RD); } ASTMutationListener *MultiplexConsumer::GetASTMutationListener() { diff --git a/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp b/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp index 7c1d9a5..037a6a5 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp @@ -64,12 +64,11 @@ static void PrintMacroDefinition(const IdentifierInfo &II, const MacroInfo &MI, OS << ' '; SmallString<128> SpellingBuffer; - for (MacroInfo::tokens_iterator I = MI.tokens_begin(), E = MI.tokens_end(); - I != E; ++I) { - if (I->hasLeadingSpace()) + for (const auto &T : MI.tokens()) { + if (T.hasLeadingSpace()) OS << ' '; - OS << PP.getSpelling(*I, SpellingBuffer); + OS << PP.getSpelling(T, SpellingBuffer); } } @@ -94,14 +93,14 @@ private: bool Initialized; bool DisableLineMarkers; bool DumpDefines; - bool UseLineDirective; + bool UseLineDirectives; bool IsFirstFileEntered; public: - PrintPPOutputPPCallbacks(Preprocessor &pp, raw_ostream &os, - bool lineMarkers, bool defines) - : PP(pp), SM(PP.getSourceManager()), - ConcatInfo(PP), OS(os), DisableLineMarkers(lineMarkers), - DumpDefines(defines) { + PrintPPOutputPPCallbacks(Preprocessor &pp, raw_ostream &os, bool lineMarkers, + bool defines, bool UseLineDirectives) + : PP(pp), SM(PP.getSourceManager()), ConcatInfo(PP), OS(os), + DisableLineMarkers(lineMarkers), DumpDefines(defines), + UseLineDirectives(UseLineDirectives) { CurLine = 0; CurFilename += "<uninit>"; EmittedTokensOnThisLine = false; @@ -109,9 +108,6 @@ public: FileType = SrcMgr::C_User; Initialized = false; IsFirstFileEntered = false; - - // If we're in microsoft mode, use normal #line instead of line markers. - UseLineDirective = PP.getLangOpts().MicrosoftExt; } void setEmittedTokensOnThisLine() { EmittedTokensOnThisLine = true; } @@ -173,7 +169,7 @@ public: /// MacroUndefined - This hook is called whenever a macro #undef is seen. void MacroUndefined(const Token &MacroNameTok, - const MacroDirective *MD) override; + const MacroDefinition &MD) override; }; } // end anonymous namespace @@ -183,7 +179,7 @@ void PrintPPOutputPPCallbacks::WriteLineInfo(unsigned LineNo, startNewLineIfNeeded(/*ShouldUpdateCurrentLine=*/false); // Emit #line directives or GNU line markers depending on what mode we're in. - if (UseLineDirective) { + if (UseLineDirectives) { OS << "#line" << ' ' << LineNo << ' ' << '"'; OS.write_escaped(CurFilename); OS << '"'; @@ -364,7 +360,7 @@ void PrintPPOutputPPCallbacks::MacroDefined(const Token &MacroNameTok, } void PrintPPOutputPPCallbacks::MacroUndefined(const Token &MacroNameTok, - const MacroDirective *MD) { + const MacroDefinition &MD) { // Only print out macro definitions in -dD mode. if (!DumpDefines) return; @@ -689,8 +685,9 @@ static void DoPrintMacros(Preprocessor &PP, raw_ostream *OS) { SmallVector<id_macro_pair, 128> MacrosByID; for (Preprocessor::macro_iterator I = PP.macro_begin(), E = PP.macro_end(); I != E; ++I) { - if (I->first->hasMacroDefinition()) - MacrosByID.push_back(id_macro_pair(I->first, I->second->getMacroInfo())); + auto *MD = I->second.getLatest(); + if (MD && MD->isDefined()) + MacrosByID.push_back(id_macro_pair(I->first, MD->getMacroInfo())); } llvm::array_pod_sort(MacrosByID.begin(), MacrosByID.end(), MacroIDCompare); @@ -719,9 +716,8 @@ void clang::DoPrintPreprocessedInput(Preprocessor &PP, raw_ostream *OS, // to -C or -CC. PP.SetCommentRetentionState(Opts.ShowComments, Opts.ShowMacroComments); - PrintPPOutputPPCallbacks *Callbacks = - new PrintPPOutputPPCallbacks(PP, *OS, !Opts.ShowLineMarkers, - Opts.ShowMacros); + PrintPPOutputPPCallbacks *Callbacks = new PrintPPOutputPPCallbacks( + PP, *OS, !Opts.ShowLineMarkers, Opts.ShowMacros, Opts.UseLineDirectives); PP.AddPragmaHandler(new UnknownPragmaHandler("#pragma", Callbacks)); PP.AddPragmaHandler("GCC", new UnknownPragmaHandler("#pragma GCC",Callbacks)); PP.AddPragmaHandler("clang", diff --git a/contrib/llvm/tools/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp index 1400557..b9ea051 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp @@ -43,14 +43,15 @@ class InclusionRewriter : public PPCallbacks { StringRef MainEOL; ///< The line ending marker to use. const llvm::MemoryBuffer *PredefinesBuffer; ///< The preprocessor predefines. bool ShowLineMarkers; ///< Show #line markers. - bool UseLineDirective; ///< Use of line directives or line markers. + bool UseLineDirectives; ///< Use of line directives or line markers. typedef std::map<unsigned, FileChange> FileChangeMap; FileChangeMap FileChanges; ///< Tracks which files were included where. /// Used transitively for building up the FileChanges mapping over the /// various \c PPCallbacks callbacks. FileChangeMap::iterator LastInsertedFileChange; public: - InclusionRewriter(Preprocessor &PP, raw_ostream &OS, bool ShowLineMarkers); + InclusionRewriter(Preprocessor &PP, raw_ostream &OS, bool ShowLineMarkers, + bool UseLineDirectives); bool Process(FileID FileId, SrcMgr::CharacteristicKind FileType); void setPredefinesBuffer(const llvm::MemoryBuffer *Buf) { PredefinesBuffer = Buf; @@ -60,7 +61,7 @@ private: void FileChanged(SourceLocation Loc, FileChangeReason Reason, SrcMgr::CharacteristicKind FileType, FileID PrevFID) override; - void FileSkipped(const FileEntry &ParentFile, const Token &FilenameTok, + void FileSkipped(const FileEntry &SkippedFile, const Token &FilenameTok, SrcMgr::CharacteristicKind FileType) override; void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok, StringRef FileName, bool IsAngled, @@ -89,13 +90,12 @@ private: /// Initializes an InclusionRewriter with a \p PP source and \p OS destination. InclusionRewriter::InclusionRewriter(Preprocessor &PP, raw_ostream &OS, - bool ShowLineMarkers) + bool ShowLineMarkers, + bool UseLineDirectives) : PP(PP), SM(PP.getSourceManager()), OS(OS), MainEOL("\n"), PredefinesBuffer(nullptr), ShowLineMarkers(ShowLineMarkers), - LastInsertedFileChange(FileChanges.end()) { - // If we're in microsoft mode, use normal #line instead of line markers. - UseLineDirective = PP.getLangOpts().MicrosoftExt; -} + UseLineDirectives(UseLineDirectives), + LastInsertedFileChange(FileChanges.end()) {} /// Write appropriate line information as either #line directives or GNU line /// markers depending on what mode we're in, including the \p Filename and @@ -106,7 +106,7 @@ void InclusionRewriter::WriteLineInfo(const char *Filename, int Line, StringRef Extra) { if (!ShowLineMarkers) return; - if (UseLineDirective) { + if (UseLineDirectives) { OS << "#line" << ' ' << Line << ' ' << '"'; OS.write_escaped(Filename); OS << '"'; @@ -153,7 +153,7 @@ void InclusionRewriter::FileChanged(SourceLocation Loc, /// Called whenever an inclusion is skipped due to canonical header protection /// macros. -void InclusionRewriter::FileSkipped(const FileEntry &/*ParentFile*/, +void InclusionRewriter::FileSkipped(const FileEntry &/*SkippedFile*/, const Token &/*FilenameTok*/, SrcMgr::CharacteristicKind /*FileType*/) { assert(LastInsertedFileChange != FileChanges.end() && "A file, that wasn't " @@ -561,8 +561,8 @@ bool InclusionRewriter::Process(FileID FileId, void clang::RewriteIncludesInInput(Preprocessor &PP, raw_ostream *OS, const PreprocessorOutputOptions &Opts) { SourceManager &SM = PP.getSourceManager(); - InclusionRewriter *Rewrite = new InclusionRewriter(PP, *OS, - Opts.ShowLineMarkers); + InclusionRewriter *Rewrite = new InclusionRewriter( + PP, *OS, Opts.ShowLineMarkers, Opts.UseLineDirectives); Rewrite->detectMainFileEOL(); PP.addPPCallbacks(std::unique_ptr<PPCallbacks>(Rewrite)); diff --git a/contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp index 47f8189..e13cdb3 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp @@ -243,8 +243,8 @@ namespace { RewriteModernObjC(std::string inFile, raw_ostream *OS, DiagnosticsEngine &D, const LangOptions &LOpts, bool silenceMacroWarn, bool LineInfo); - - ~RewriteModernObjC() {} + + ~RewriteModernObjC() override {} void HandleTranslationUnit(ASTContext &C) override; @@ -889,9 +889,9 @@ RewriteModernObjC::getIvarAccessString(ObjCIvarDecl *D) { IvarT, nullptr, /*BitWidth=*/nullptr, /*Mutable=*/true, ICIS_NoInit); - MemberExpr *ME = new (Context) MemberExpr(PE, true, FD, SourceLocation(), - FD->getType(), VK_LValue, - OK_Ordinary); + MemberExpr *ME = new (Context) + MemberExpr(PE, true, SourceLocation(), FD, SourceLocation(), + FD->getType(), VK_LValue, OK_Ordinary); IvarT = Context->getDecltypeType(ME, ME->getType()); } } @@ -2689,7 +2689,7 @@ Stmt *RewriteModernObjC::RewriteObjCBoxedExpr(ObjCBoxedExpr *Exp) { MsgExprs.push_back(subExpr); SmallVector<QualType, 4> ArgTypes; - ArgTypes.push_back(Context->getObjCIdType()); + ArgTypes.push_back(Context->getObjCClassType()); ArgTypes.push_back(Context->getObjCSelType()); for (const auto PI : BoxingMethod->parameters()) ArgTypes.push_back(PI->getType()); @@ -2767,11 +2767,9 @@ Stmt *RewriteModernObjC::RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp) { Context->getPointerType(Context->VoidPtrTy), nullptr, /*BitWidth=*/nullptr, /*Mutable=*/true, ICIS_NoInit); - MemberExpr *ArrayLiteralME = - new (Context) MemberExpr(NSArrayCallExpr, false, ARRFD, - SourceLocation(), - ARRFD->getType(), VK_LValue, - OK_Ordinary); + MemberExpr *ArrayLiteralME = new (Context) + MemberExpr(NSArrayCallExpr, false, SourceLocation(), ARRFD, + SourceLocation(), ARRFD->getType(), VK_LValue, OK_Ordinary); QualType ConstIdT = Context->getObjCIdType().withConst(); CStyleCastExpr * ArrayLiteralObjects = NoTypeInfoCStyleCastExpr(Context, @@ -2818,7 +2816,7 @@ Stmt *RewriteModernObjC::RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp) { SmallVector<QualType, 4> ArgTypes; - ArgTypes.push_back(Context->getObjCIdType()); + ArgTypes.push_back(Context->getObjCClassType()); ArgTypes.push_back(Context->getObjCSelType()); for (const auto *PI : ArrayMethod->params()) ArgTypes.push_back(PI->getType()); @@ -2904,11 +2902,9 @@ Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral Context->getPointerType(Context->VoidPtrTy), nullptr, /*BitWidth=*/nullptr, /*Mutable=*/true, ICIS_NoInit); - MemberExpr *DictLiteralValueME = - new (Context) MemberExpr(NSValueCallExpr, false, ARRFD, - SourceLocation(), - ARRFD->getType(), VK_LValue, - OK_Ordinary); + MemberExpr *DictLiteralValueME = new (Context) + MemberExpr(NSValueCallExpr, false, SourceLocation(), ARRFD, + SourceLocation(), ARRFD->getType(), VK_LValue, OK_Ordinary); QualType ConstIdT = Context->getObjCIdType().withConst(); CStyleCastExpr * DictValueObjects = NoTypeInfoCStyleCastExpr(Context, @@ -2919,13 +2915,11 @@ Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral Expr *NSKeyCallExpr = new (Context) CallExpr(*Context, NSDictDRE, KeyExprs, NSDictFType, VK_LValue, SourceLocation()); - - MemberExpr *DictLiteralKeyME = - new (Context) MemberExpr(NSKeyCallExpr, false, ARRFD, - SourceLocation(), - ARRFD->getType(), VK_LValue, - OK_Ordinary); - + + MemberExpr *DictLiteralKeyME = new (Context) + MemberExpr(NSKeyCallExpr, false, SourceLocation(), ARRFD, + SourceLocation(), ARRFD->getType(), VK_LValue, OK_Ordinary); + CStyleCastExpr * DictKeyObjects = NoTypeInfoCStyleCastExpr(Context, Context->getPointerType(ConstIdT), @@ -2975,7 +2969,7 @@ Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral SmallVector<QualType, 8> ArgTypes; - ArgTypes.push_back(Context->getObjCIdType()); + ArgTypes.push_back(Context->getObjCClassType()); ArgTypes.push_back(Context->getObjCSelType()); for (const auto *PI : DictMethod->params()) { QualType T = PI->getType(); @@ -3234,9 +3228,9 @@ Expr *RewriteModernObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFla returnType, nullptr, /*BitWidth=*/nullptr, /*Mutable=*/true, ICIS_NoInit); - MemberExpr *ME = new (Context) MemberExpr(STCE, false, FieldD, SourceLocation(), - FieldD->getType(), VK_LValue, - OK_Ordinary); + MemberExpr *ME = new (Context) + MemberExpr(STCE, false, SourceLocation(), FieldD, SourceLocation(), + FieldD->getType(), VK_LValue, OK_Ordinary); return ME; } @@ -4732,11 +4726,10 @@ Stmt *RewriteModernObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp Context->VoidPtrTy, nullptr, /*BitWidth=*/nullptr, /*Mutable=*/true, ICIS_NoInit); - MemberExpr *ME = new (Context) MemberExpr(PE, true, FD, SourceLocation(), - FD->getType(), VK_LValue, - OK_Ordinary); + MemberExpr *ME = + new (Context) MemberExpr(PE, true, SourceLocation(), FD, SourceLocation(), + FD->getType(), VK_LValue, OK_Ordinary); - CastExpr *FunkCast = NoTypeInfoCStyleCastExpr(Context, PtrToFuncCastType, CK_BitCast, ME); PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), FunkCast); @@ -4781,10 +4774,9 @@ Stmt *RewriteModernObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) { Context->VoidPtrTy, nullptr, /*BitWidth=*/nullptr, /*Mutable=*/true, ICIS_NoInit); - MemberExpr *ME = new (Context) MemberExpr(DeclRefExp, isArrow, - FD, SourceLocation(), - FD->getType(), VK_LValue, - OK_Ordinary); + MemberExpr *ME = new (Context) + MemberExpr(DeclRefExp, isArrow, SourceLocation(), FD, SourceLocation(), + FD->getType(), VK_LValue, OK_Ordinary); StringRef Name = VD->getName(); FD = FieldDecl::Create(*Context, nullptr, SourceLocation(), SourceLocation(), @@ -4792,11 +4784,10 @@ Stmt *RewriteModernObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) { Context->VoidPtrTy, nullptr, /*BitWidth=*/nullptr, /*Mutable=*/true, ICIS_NoInit); - ME = new (Context) MemberExpr(ME, true, FD, SourceLocation(), - DeclRefExp->getType(), VK_LValue, OK_Ordinary); - - - + ME = + new (Context) MemberExpr(ME, true, SourceLocation(), FD, SourceLocation(), + DeclRefExp->getType(), VK_LValue, OK_Ordinary); + // Need parens to enforce precedence. ParenExpr *PE = new (Context) ParenExpr(DeclRefExp->getExprLoc(), DeclRefExp->getExprLoc(), @@ -7694,9 +7685,9 @@ Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) { IvarT, nullptr, /*BitWidth=*/nullptr, /*Mutable=*/true, ICIS_NoInit); - MemberExpr *ME = new (Context) MemberExpr(PE, true, FD, SourceLocation(), - FD->getType(), VK_LValue, - OK_Ordinary); + MemberExpr *ME = new (Context) + MemberExpr(PE, true, SourceLocation(), FD, SourceLocation(), + FD->getType(), VK_LValue, OK_Ordinary); IvarT = Context->getDecltypeType(ME, ME->getType()); } } @@ -7723,9 +7714,9 @@ Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) { D->getType(), nullptr, /*BitWidth=*/D->getBitWidth(), /*Mutable=*/true, ICIS_NoInit); - MemberExpr *ME = new (Context) MemberExpr(PE, /*isArrow*/false, FD, SourceLocation(), - FD->getType(), VK_LValue, - OK_Ordinary); + MemberExpr *ME = new (Context) + MemberExpr(PE, /*isArrow*/ false, SourceLocation(), FD, + SourceLocation(), FD->getType(), VK_LValue, OK_Ordinary); Replacement = ME; } diff --git a/contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteObjC.cpp b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteObjC.cpp index 5196810..b2a45b4 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteObjC.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/Rewrite/RewriteObjC.cpp @@ -193,7 +193,7 @@ namespace { DiagnosticsEngine &D, const LangOptions &LOpts, bool silenceMacroWarn); - ~RewriteObjC() {} + ~RewriteObjC() override {} void HandleTranslationUnit(ASTContext &C) override; @@ -511,8 +511,8 @@ namespace { bool silenceMacroWarn) : RewriteObjC(inFile, OS, D, LOpts, silenceMacroWarn) {} - - ~RewriteObjCFragileABI() {} + + ~RewriteObjCFragileABI() override {} void Initialize(ASTContext &context) override; // Rewriting metadata @@ -3821,11 +3821,10 @@ Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) { Context->VoidPtrTy, nullptr, /*BitWidth=*/nullptr, /*Mutable=*/true, ICIS_NoInit); - MemberExpr *ME = new (Context) MemberExpr(PE, true, FD, SourceLocation(), - FD->getType(), VK_LValue, - OK_Ordinary); + MemberExpr *ME = + new (Context) MemberExpr(PE, true, SourceLocation(), FD, SourceLocation(), + FD->getType(), VK_LValue, OK_Ordinary); - CastExpr *FunkCast = NoTypeInfoCStyleCastExpr(Context, PtrToFuncCastType, CK_BitCast, ME); PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), FunkCast); @@ -3870,10 +3869,9 @@ Stmt *RewriteObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) { Context->VoidPtrTy, nullptr, /*BitWidth=*/nullptr, /*Mutable=*/true, ICIS_NoInit); - MemberExpr *ME = new (Context) MemberExpr(DeclRefExp, isArrow, - FD, SourceLocation(), - FD->getType(), VK_LValue, - OK_Ordinary); + MemberExpr *ME = new (Context) + MemberExpr(DeclRefExp, isArrow, SourceLocation(), FD, SourceLocation(), + FD->getType(), VK_LValue, OK_Ordinary); StringRef Name = VD->getName(); FD = FieldDecl::Create(*Context, nullptr, SourceLocation(), SourceLocation(), @@ -3881,11 +3879,10 @@ Stmt *RewriteObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) { Context->VoidPtrTy, nullptr, /*BitWidth=*/nullptr, /*Mutable=*/true, ICIS_NoInit); - ME = new (Context) MemberExpr(ME, true, FD, SourceLocation(), - DeclRefExp->getType(), VK_LValue, OK_Ordinary); - - - + ME = + new (Context) MemberExpr(ME, true, SourceLocation(), FD, SourceLocation(), + DeclRefExp->getType(), VK_LValue, OK_Ordinary); + // Need parens to enforce precedence. ParenExpr *PE = new (Context) ParenExpr(DeclRefExp->getExprLoc(), DeclRefExp->getExprLoc(), @@ -5880,10 +5877,9 @@ Stmt *RewriteObjCFragileABI::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) { castExpr); if (IV->isFreeIvar() && declaresSameEntity(CurMethodDef->getClassInterface(), iFaceDecl->getDecl())) { - MemberExpr *ME = new (Context) MemberExpr(PE, true, D, - IV->getLocation(), - D->getType(), - VK_LValue, OK_Ordinary); + MemberExpr *ME = new (Context) + MemberExpr(PE, true, SourceLocation(), D, IV->getLocation(), + D->getType(), VK_LValue, OK_Ordinary); Replacement = ME; } else { IV->setBase(PE); diff --git a/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp b/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp index f701f72..d31b12e 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp @@ -20,6 +20,7 @@ #include "clang/Frontend/TextDiagnosticPrinter.h" #include "clang/Lex/Lexer.h" #include "llvm/ADT/DenseSet.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/raw_ostream.h" @@ -60,8 +61,8 @@ public: DiagnosticOptions *DiagOpts) : DiagnosticNoteRenderer(LangOpts, DiagOpts), Writer(Writer) {} - virtual ~SDiagsRenderer() {} - + ~SDiagsRenderer() override {} + protected: void emitDiagnosticMessage(SourceLocation Loc, PresumedLoc PLoc, @@ -157,7 +158,7 @@ public: EmitPreamble(); } - ~SDiagsWriter() {} + ~SDiagsWriter() override {} void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel, const Diagnostic &Info) override; @@ -631,7 +632,7 @@ void SDiagsWriter::HandleDiagnostic(DiagnosticsEngine::Level DiagLevel, "Unexpected diagnostic with valid location outside of a source file"); SDiagsRenderer Renderer(*this, *LangOpts, &*State->DiagOpts); Renderer.emitDiagnostic(Info.getLocation(), DiagLevel, - State->diagBuf.str(), + State->diagBuf, Info.getRanges(), Info.getFixItHints(), &Info.getSourceManager(), diff --git a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp index bbc9914..aaf17a9 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp @@ -799,18 +799,18 @@ void TextDiagnostic::emitDiagnosticLoc(SourceLocation Loc, PresumedLoc PLoc, OS << PLoc.getFilename(); switch (DiagOpts->getFormat()) { case DiagnosticOptions::Clang: OS << ':' << LineNo; break; - case DiagnosticOptions::Msvc: OS << '(' << LineNo; break; + case DiagnosticOptions::MSVC: OS << '(' << LineNo; break; case DiagnosticOptions::Vi: OS << " +" << LineNo; break; } if (DiagOpts->ShowColumn) // Compute the column number. if (unsigned ColNo = PLoc.getColumn()) { - if (DiagOpts->getFormat() == DiagnosticOptions::Msvc) { + if (DiagOpts->getFormat() == DiagnosticOptions::MSVC) { OS << ','; // Visual Studio 2010 or earlier expects column number to be off by one if (LangOpts.MSCompatibilityVersion && - LangOpts.MSCompatibilityVersion < 170000000) + !LangOpts.isCompatibleWithMSVC(LangOptions::MSVC2012)) ColNo--; } else OS << ':'; @@ -819,7 +819,7 @@ void TextDiagnostic::emitDiagnosticLoc(SourceLocation Loc, PresumedLoc PLoc, switch (DiagOpts->getFormat()) { case DiagnosticOptions::Clang: case DiagnosticOptions::Vi: OS << ':'; break; - case DiagnosticOptions::Msvc: OS << ") : "; break; + case DiagnosticOptions::MSVC: OS << ") : "; break; } if (DiagOpts->ShowSourceRanges && !Ranges.empty()) { diff --git a/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp b/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp index 3ff6b18..910e394 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp @@ -58,9 +58,9 @@ public: /// \brief Hook into the preprocessor and update the list of parsed /// files when the preprocessor indicates a new file is entered. - virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason, - SrcMgr::CharacteristicKind FileType, - FileID PrevFID) { + void FileChanged(SourceLocation Loc, FileChangeReason Reason, + SrcMgr::CharacteristicKind FileType, + FileID PrevFID) override { Verify.UpdateParsedFileStatus(SM, SM.getFileID(Loc), VerifyDiagnosticConsumer::IsParsed); } diff --git a/contrib/llvm/tools/clang/lib/Headers/Intrin.h b/contrib/llvm/tools/clang/lib/Headers/Intrin.h index 84bc430..727a55e 100644 --- a/contrib/llvm/tools/clang/lib/Headers/Intrin.h +++ b/contrib/llvm/tools/clang/lib/Headers/Intrin.h @@ -289,6 +289,7 @@ void _WriteBarrier(void); unsigned __int32 xbegin(void); void _xend(void); static __inline__ +#define _XCR_XFEATURE_ENABLED_MASK 0 unsigned __int64 __cdecl _xgetbv(unsigned int); void __cdecl _xrstor(void const *, unsigned __int64); void __cdecl _xsave(void *, unsigned __int64); @@ -780,17 +781,17 @@ _InterlockedCompareExchange64(__int64 volatile *_Destination, \*----------------------------------------------------------------------------*/ #if defined(__i386__) || defined(__x86_64__) static __inline__ void __attribute__((__always_inline__, __nodebug__)) -__attribute__((deprecated("use other intrinsics or C++11 atomics instead"))) +__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead"))) _ReadWriteBarrier(void) { __asm__ volatile ("" : : : "memory"); } static __inline__ void __attribute__((__always_inline__, __nodebug__)) -__attribute__((deprecated("use other intrinsics or C++11 atomics instead"))) +__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead"))) _ReadBarrier(void) { __asm__ volatile ("" : : : "memory"); } static __inline__ void __attribute__((__always_inline__, __nodebug__)) -__attribute__((deprecated("use other intrinsics or C++11 atomics instead"))) +__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead"))) _WriteBarrier(void) { __asm__ volatile ("" : : : "memory"); } @@ -943,14 +944,14 @@ __readmsr(unsigned long __register) { return (((unsigned __int64)__edx) << 32) | (unsigned __int64)__eax; } -static __inline__ unsigned long __attribute__((always_inline, __nodebug__)) +static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) __readcr3(void) { unsigned long __cr3_val; __asm__ __volatile__ ("mov %%cr3, %0" : "=q"(__cr3_val) : : "memory"); return __cr3_val; } -static __inline__ void __attribute__((always_inline, __nodebug__)) +static __inline__ void __attribute__((__always_inline__, __nodebug__)) __writecr3(unsigned int __cr3_val) { __asm__ ("mov %0, %%cr3" : : "q"(__cr3_val) : "memory"); } diff --git a/contrib/llvm/tools/clang/lib/Headers/__stddef_max_align_t.h b/contrib/llvm/tools/clang/lib/Headers/__stddef_max_align_t.h index a06f412..1e10ca9 100644 --- a/contrib/llvm/tools/clang/lib/Headers/__stddef_max_align_t.h +++ b/contrib/llvm/tools/clang/lib/Headers/__stddef_max_align_t.h @@ -26,15 +26,18 @@ #ifndef __CLANG_MAX_ALIGN_T_DEFINED #define __CLANG_MAX_ALIGN_T_DEFINED -#ifndef _MSC_VER +#if defined(_MSC_VER) +typedef double max_align_t; +#elif defined(__APPLE__) +typedef long double max_align_t; +#else +// Define 'max_align_t' to match the GCC definition. typedef struct { long long __clang_max_align_nonce1 __attribute__((__aligned__(__alignof__(long long)))); long double __clang_max_align_nonce2 __attribute__((__aligned__(__alignof__(long double)))); } max_align_t; -#else -typedef double max_align_t; #endif #endif diff --git a/contrib/llvm/tools/clang/lib/Headers/altivec.h b/contrib/llvm/tools/clang/lib/Headers/altivec.h index 0ac0841..1f8c831 100644 --- a/contrib/llvm/tools/clang/lib/Headers/altivec.h +++ b/contrib/llvm/tools/clang/lib/Headers/altivec.h @@ -73,6 +73,18 @@ vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c); static vector float __ATTRS_o_ai vec_perm(vector float __a, vector float __b, vector unsigned char __c); +#ifdef __VSX__ +static vector long long __ATTRS_o_ai +vec_perm(vector long long __a, vector long long __b, vector unsigned char __c); + +static vector unsigned long long __ATTRS_o_ai +vec_perm(vector unsigned long long __a, vector unsigned long long __b, + vector unsigned char __c); + +static vector double __ATTRS_o_ai +vec_perm(vector double __a, vector double __b, vector unsigned char __c); +#endif + static vector unsigned char __ATTRS_o_ai vec_xor(vector unsigned char __a, vector unsigned char __b); @@ -245,6 +257,20 @@ vec_add(vector unsigned int __a, vector bool int __b) return __a + (vector unsigned int)__b; } +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) +static vector signed __int128 __ATTRS_o_ai +vec_add(vector signed __int128 __a, vector signed __int128 __b) +{ + return __a + __b; +} + +static vector unsigned __int128 __ATTRS_o_ai +vec_add(vector unsigned __int128 __a, vector unsigned __int128 __b) +{ + return __a + __b; +} +#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__) + static vector float __ATTRS_o_ai vec_add(vector float __a, vector float __b) { @@ -383,12 +409,24 @@ vec_vaddfp(vector float __a, vector float __b) /* vec_addc */ -static vector unsigned int __attribute__((__always_inline__)) +static vector unsigned int __ATTRS_o_ai vec_addc(vector unsigned int __a, vector unsigned int __b) { return __builtin_altivec_vaddcuw(__a, __b); } +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) +static vector signed __int128 __ATTRS_o_ai +vec_addc(vector signed __int128 __a, vector signed __int128 __b) { + return __builtin_altivec_vaddcuq(__a, __b); +} + +static vector unsigned __int128 __ATTRS_o_ai +vec_addc(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __builtin_altivec_vaddcuq(__a, __b); +} +#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__) + /* vec_vaddcuw */ static vector unsigned int __attribute__((__always_inline__)) @@ -627,6 +665,64 @@ vec_vadduws(vector unsigned int __a, vector bool int __b) return __builtin_altivec_vadduws(__a, (vector unsigned int)__b); } +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) +/* vec_vadduqm */ + +static vector signed __int128 __ATTRS_o_ai +vec_vadduqm(vector signed __int128 __a, vector signed __int128 __b) +{ + return __a + __b; +} + +static vector unsigned __int128 __ATTRS_o_ai +vec_vadduqm(vector unsigned __int128 __a, vector unsigned __int128 __b) +{ + return __a + __b; +} + +/* vec_vaddeuqm */ + +static vector signed __int128 __ATTRS_o_ai +vec_vaddeuqm(vector signed __int128 __a, vector signed __int128 __b, + vector signed __int128 __c) { + return __builtin_altivec_vaddeuqm(__a, __b, __c); +} + +static vector unsigned __int128 __ATTRS_o_ai +vec_vaddeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b, + vector unsigned __int128 __c) { + return __builtin_altivec_vaddeuqm(__a, __b, __c); +} + +/* vec_vaddcuq */ + +static vector signed __int128 __ATTRS_o_ai +vec_vaddcuq(vector signed __int128 __a, vector signed __int128 __b) +{ + return __builtin_altivec_vaddcuq(__a, __b); +} + +static vector unsigned __int128 __ATTRS_o_ai +vec_vaddcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) +{ + return __builtin_altivec_vaddcuq(__a, __b); +} + +/* vec_vaddecuq */ + +static vector signed __int128 __ATTRS_o_ai +vec_vaddecuq(vector signed __int128 __a, vector signed __int128 __b, + vector signed __int128 __c) { + return __builtin_altivec_vaddecuq(__a, __b, __c); +} + +static vector unsigned __int128 __ATTRS_o_ai +vec_vaddecuq(vector unsigned __int128 __a, vector unsigned __int128 __b, + vector unsigned __int128 __c) { + return __builtin_altivec_vaddecuq(__a, __b, __c); +} +#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__) + /* vec_and */ #define __builtin_altivec_vand vec_and @@ -1387,6 +1483,21 @@ vec_cmpeq(vector unsigned int __a, vector unsigned int __b) __builtin_altivec_vcmpequw((vector int)__a, (vector int)__b); } +#ifdef __POWER8_VECTOR__ +static vector bool long long __ATTRS_o_ai +vec_cmpeq(vector signed long long __a, vector signed long long __b) +{ + return (vector bool long long) __builtin_altivec_vcmpequd(__a, __b); +} + +static vector bool long long __ATTRS_o_ai +vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) +{ + return (vector bool long long) + __builtin_altivec_vcmpequd((vector long long)__a, (vector long long) __b); +} +#endif + static vector bool int __ATTRS_o_ai vec_cmpeq(vector float __a, vector float __b) { @@ -1447,6 +1558,20 @@ vec_cmpgt(vector unsigned int __a, vector unsigned int __b) return (vector bool int)__builtin_altivec_vcmpgtuw(__a, __b); } +#ifdef __POWER8_VECTOR__ +static vector bool long long __ATTRS_o_ai +vec_cmpgt(vector signed long long __a, vector signed long long __b) +{ + return (vector bool long long)__builtin_altivec_vcmpgtsd(__a, __b); +} + +static vector bool long long __ATTRS_o_ai +vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) +{ + return (vector bool long long)__builtin_altivec_vcmpgtud(__a, __b); +} +#endif + static vector bool int __ATTRS_o_ai vec_cmpgt(vector float __a, vector float __b) { @@ -2270,7 +2395,7 @@ vec_vlogefp(vector float __a) #ifdef __LITTLE_ENDIAN__ static vector unsigned char __ATTRS_o_ai -__attribute__((deprecated("use assignment for unaligned little endian \ +__attribute__((__deprecated__("use assignment for unaligned little endian \ loads/stores"))) vec_lvsl(int __a, const signed char *__b) { @@ -2289,7 +2414,7 @@ vec_lvsl(int __a, const signed char *__b) #ifdef __LITTLE_ENDIAN__ static vector unsigned char __ATTRS_o_ai -__attribute__((deprecated("use assignment for unaligned little endian \ +__attribute__((__deprecated__("use assignment for unaligned little endian \ loads/stores"))) vec_lvsl(int __a, const unsigned char *__b) { @@ -2308,7 +2433,7 @@ vec_lvsl(int __a, const unsigned char *__b) #ifdef __LITTLE_ENDIAN__ static vector unsigned char __ATTRS_o_ai -__attribute__((deprecated("use assignment for unaligned little endian \ +__attribute__((__deprecated__("use assignment for unaligned little endian \ loads/stores"))) vec_lvsl(int __a, const short *__b) { @@ -2327,7 +2452,7 @@ vec_lvsl(int __a, const short *__b) #ifdef __LITTLE_ENDIAN__ static vector unsigned char __ATTRS_o_ai -__attribute__((deprecated("use assignment for unaligned little endian \ +__attribute__((__deprecated__("use assignment for unaligned little endian \ loads/stores"))) vec_lvsl(int __a, const unsigned short *__b) { @@ -2346,7 +2471,7 @@ vec_lvsl(int __a, const unsigned short *__b) #ifdef __LITTLE_ENDIAN__ static vector unsigned char __ATTRS_o_ai -__attribute__((deprecated("use assignment for unaligned little endian \ +__attribute__((__deprecated__("use assignment for unaligned little endian \ loads/stores"))) vec_lvsl(int __a, const int *__b) { @@ -2365,7 +2490,7 @@ vec_lvsl(int __a, const int *__b) #ifdef __LITTLE_ENDIAN__ static vector unsigned char __ATTRS_o_ai -__attribute__((deprecated("use assignment for unaligned little endian \ +__attribute__((__deprecated__("use assignment for unaligned little endian \ loads/stores"))) vec_lvsl(int __a, const unsigned int *__b) { @@ -2384,7 +2509,7 @@ vec_lvsl(int __a, const unsigned int *__b) #ifdef __LITTLE_ENDIAN__ static vector unsigned char __ATTRS_o_ai -__attribute__((deprecated("use assignment for unaligned little endian \ +__attribute__((__deprecated__("use assignment for unaligned little endian \ loads/stores"))) vec_lvsl(int __a, const float *__b) { @@ -2405,7 +2530,7 @@ vec_lvsl(int __a, const float *__b) #ifdef __LITTLE_ENDIAN__ static vector unsigned char __ATTRS_o_ai -__attribute__((deprecated("use assignment for unaligned little endian \ +__attribute__((__deprecated__("use assignment for unaligned little endian \ loads/stores"))) vec_lvsr(int __a, const signed char *__b) { @@ -2424,7 +2549,7 @@ vec_lvsr(int __a, const signed char *__b) #ifdef __LITTLE_ENDIAN__ static vector unsigned char __ATTRS_o_ai -__attribute__((deprecated("use assignment for unaligned little endian \ +__attribute__((__deprecated__("use assignment for unaligned little endian \ loads/stores"))) vec_lvsr(int __a, const unsigned char *__b) { @@ -2443,7 +2568,7 @@ vec_lvsr(int __a, const unsigned char *__b) #ifdef __LITTLE_ENDIAN__ static vector unsigned char __ATTRS_o_ai -__attribute__((deprecated("use assignment for unaligned little endian \ +__attribute__((__deprecated__("use assignment for unaligned little endian \ loads/stores"))) vec_lvsr(int __a, const short *__b) { @@ -2462,7 +2587,7 @@ vec_lvsr(int __a, const short *__b) #ifdef __LITTLE_ENDIAN__ static vector unsigned char __ATTRS_o_ai -__attribute__((deprecated("use assignment for unaligned little endian \ +__attribute__((__deprecated__("use assignment for unaligned little endian \ loads/stores"))) vec_lvsr(int __a, const unsigned short *__b) { @@ -2481,7 +2606,7 @@ vec_lvsr(int __a, const unsigned short *__b) #ifdef __LITTLE_ENDIAN__ static vector unsigned char __ATTRS_o_ai -__attribute__((deprecated("use assignment for unaligned little endian \ +__attribute__((__deprecated__("use assignment for unaligned little endian \ loads/stores"))) vec_lvsr(int __a, const int *__b) { @@ -2500,7 +2625,7 @@ vec_lvsr(int __a, const int *__b) #ifdef __LITTLE_ENDIAN__ static vector unsigned char __ATTRS_o_ai -__attribute__((deprecated("use assignment for unaligned little endian \ +__attribute__((__deprecated__("use assignment for unaligned little endian \ loads/stores"))) vec_lvsr(int __a, const unsigned int *__b) { @@ -2519,7 +2644,7 @@ vec_lvsr(int __a, const unsigned int *__b) #ifdef __LITTLE_ENDIAN__ static vector unsigned char __ATTRS_o_ai -__attribute__((deprecated("use assignment for unaligned little endian \ +__attribute__((__deprecated__("use assignment for unaligned little endian \ loads/stores"))) vec_lvsr(int __a, const float *__b) { @@ -2679,6 +2804,20 @@ vec_max(vector unsigned int __a, vector bool int __b) return __builtin_altivec_vmaxuw(__a, (vector unsigned int)__b); } +#ifdef __POWER8_VECTOR__ +static vector signed long long __ATTRS_o_ai +vec_max(vector signed long long __a, vector signed long long __b) +{ + return __builtin_altivec_vmaxsd(__a, __b); +} + +static vector unsigned long long __ATTRS_o_ai +vec_max(vector unsigned long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vmaxud(__a, __b); +} +#endif + static vector float __ATTRS_o_ai vec_max(vector float __a, vector float __b) { @@ -3327,6 +3466,20 @@ vec_min(vector unsigned int __a, vector bool int __b) return __builtin_altivec_vminuw(__a, (vector unsigned int)__b); } +#ifdef __POWER8_VECTOR__ +static vector signed long long __ATTRS_o_ai +vec_min(vector signed long long __a, vector signed long long __b) +{ + return __builtin_altivec_vminsd(__a, __b); +} + +static vector unsigned long long __ATTRS_o_ai +vec_min(vector unsigned long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vminud(__a, __b); +} +#endif + static vector float __ATTRS_o_ai vec_min(vector float __a, vector float __b) { @@ -3762,6 +3915,28 @@ vec_mule(vector unsigned short __a, vector unsigned short __b) #endif } +#ifdef __POWER8_VECTOR__ +static vector signed long long __ATTRS_o_ai +vec_mule(vector signed int __a, vector signed int __b) +{ +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulosw(__a, __b); +#else + return __builtin_altivec_vmulesw(__a, __b); +#endif +} + +static vector unsigned long long __ATTRS_o_ai +vec_mule(vector unsigned int __a, vector unsigned int __b) +{ +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulouw(__a, __b); +#else + return __builtin_altivec_vmuleuw(__a, __b); +#endif +} +#endif + /* vec_vmulesb */ static vector short __attribute__((__always_inline__)) @@ -3852,6 +4027,28 @@ vec_mulo(vector unsigned short __a, vector unsigned short __b) #endif } +#ifdef __POWER8_VECTOR__ +static vector signed long long __ATTRS_o_ai +vec_mulo(vector signed int __a, vector signed int __b) +{ +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmulesw(__a, __b); +#else + return __builtin_altivec_vmulosw(__a, __b); +#endif +} + +static vector unsigned long long __ATTRS_o_ai +vec_mulo(vector unsigned int __a, vector unsigned int __b) +{ +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vmuleuw(__a, __b); +#else + return __builtin_altivec_vmulouw(__a, __b); +#endif +} +#endif + /* vec_vmulosb */ static vector short __attribute__((__always_inline__)) @@ -4525,6 +4722,58 @@ vec_vpkuwum(vector bool int __a, vector bool int __b) #endif } +/* vec_vpkudum */ + +#ifdef __POWER8_VECTOR__ +#define __builtin_altivec_vpkudum vec_vpkudum + +static vector int __ATTRS_o_ai +vec_vpkudum(vector long long __a, vector long long __b) +{ +#ifdef __LITTLE_ENDIAN__ + return (vector int)vec_perm(__a, __b, (vector unsigned char) + (0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B, + 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B)); +#else + return (vector int)vec_perm(__a, __b, (vector unsigned char) + (0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F, + 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F)); +#endif +} + +static vector unsigned int __ATTRS_o_ai +vec_vpkudum(vector unsigned long long __a, vector unsigned long long __b) +{ +#ifdef __LITTLE_ENDIAN__ + return (vector unsigned int)vec_perm(__a, __b, (vector unsigned char) + (0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B, + 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B)); +#else + return (vector unsigned int)vec_perm(__a, __b, (vector unsigned char) + (0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F, + 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F)); +#endif +} + +static vector bool int __ATTRS_o_ai +vec_vpkudum(vector bool long long __a, vector bool long long __b) +{ +#ifdef __LITTLE_ENDIAN__ + return (vector bool int)vec_perm((vector long long)__a, + (vector long long)__b, + (vector unsigned char) + (0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B, + 0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B)); +#else + return (vector bool int)vec_perm((vector long long)__a, + (vector long long)__b, + (vector unsigned char) + (0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F, + 0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F)); +#endif +} +#endif + /* vec_packpx */ static vector pixel __attribute__((__always_inline__)) @@ -4591,6 +4840,28 @@ vec_packs(vector unsigned int __a, vector unsigned int __b) #endif } +#ifdef __POWER8_VECTOR__ +static vector int __ATTRS_o_ai +vec_packs(vector long long __a, vector long long __b) +{ +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpksdss(__b, __a); +#else + return __builtin_altivec_vpksdss(__a, __b); +#endif +} + +static vector unsigned int __ATTRS_o_ai +vec_packs(vector unsigned long long __a, vector unsigned long long __b) +{ +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkudus(__b, __a); +#else + return __builtin_altivec_vpkudus(__a, __b); +#endif +} +#endif + /* vec_vpkshss */ static vector signed char __attribute__((__always_inline__)) @@ -4603,6 +4874,20 @@ vec_vpkshss(vector short __a, vector short __b) #endif } +/* vec_vpksdss */ + +#ifdef __POWER8_VECTOR__ +static vector int __ATTRS_o_ai +vec_vpksdss(vector long long __a, vector long long __b) +{ +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpksdss(__b, __a); +#else + return __builtin_altivec_vpksdss(__a, __b); +#endif +} +#endif + /* vec_vpkuhus */ static vector unsigned char __attribute__((__always_inline__)) @@ -4615,6 +4900,20 @@ vec_vpkuhus(vector unsigned short __a, vector unsigned short __b) #endif } +/* vec_vpkudus */ + +#ifdef __POWER8_VECTOR__ +static vector unsigned int __attribute__((__always_inline__)) +vec_vpkudus(vector unsigned long long __a, vector unsigned long long __b) +{ +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkudus(__b, __a); +#else + return __builtin_altivec_vpkudus(__a, __b); +#endif +} +#endif + /* vec_vpkswss */ static vector signed short __attribute__((__always_inline__)) @@ -4681,6 +4980,28 @@ vec_packsu(vector unsigned int __a, vector unsigned int __b) #endif } +#ifdef __POWER8_VECTOR__ +static vector unsigned int __ATTRS_o_ai +vec_packsu(vector long long __a, vector long long __b) +{ +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpksdus(__b, __a); +#else + return __builtin_altivec_vpksdus(__a, __b); +#endif +} + +static vector unsigned int __ATTRS_o_ai +vec_packsu(vector unsigned long long __a, vector unsigned long long __b) +{ +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpkudus(__b, __a); +#else + return __builtin_altivec_vpkudus(__a, __b); +#endif +} +#endif + /* vec_vpkshus */ static vector unsigned char __ATTRS_o_ai @@ -4725,6 +5046,20 @@ vec_vpkswus(vector unsigned int __a, vector unsigned int __b) #endif } +/* vec_vpksdus */ + +#ifdef __POWER8_VECTOR__ +static vector unsigned int __ATTRS_o_ai +vec_vpksdus(vector long long __a, vector long long __b) +{ +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vpksdus(__b, __a); +#else + return __builtin_altivec_vpksdus(__a, __b); +#endif +} +#endif + /* vec_perm */ // The vperm instruction is defined architecturally with a big-endian bias. @@ -5095,6 +5430,20 @@ vec_rl(vector unsigned int __a, vector unsigned int __b) return (vector unsigned int)__builtin_altivec_vrlw((vector int)__a, __b); } +#ifdef __POWER8_VECTOR__ +static vector signed long long __ATTRS_o_ai +vec_rl(vector signed long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vrld(__a, __b); +} + +static vector unsigned long long __ATTRS_o_ai +vec_rl(vector unsigned long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vrld(__a, __b); +} +#endif + /* vec_vrlb */ static vector signed char __ATTRS_o_ai @@ -5465,6 +5814,20 @@ vec_sl(vector unsigned int __a, vector unsigned int __b) return __a << __b; } +#ifdef __POWER8_VECTOR__ +static vector signed long long __ATTRS_o_ai +vec_sl(vector signed long long __a, vector unsigned long long __b) +{ + return __a << (vector long long)__b; +} + +static vector unsigned long long __ATTRS_o_ai +vec_sl(vector unsigned long long __a, vector unsigned long long __b) +{ + return __a << __b; +} +#endif + /* vec_vslb */ #define __builtin_altivec_vslb vec_vslb @@ -6566,6 +6929,20 @@ vec_sr(vector unsigned int __a, vector unsigned int __b) return __a >> __b; } +#ifdef __POWER8_VECTOR__ +static vector signed long long __ATTRS_o_ai +vec_sr(vector signed long long __a, vector unsigned long long __b) +{ + return __a >> (vector long long)__b; +} + +static vector unsigned long long __ATTRS_o_ai +vec_sr(vector unsigned long long __a, vector unsigned long long __b) +{ + return __a >> __b; +} +#endif + /* vec_vsrb */ #define __builtin_altivec_vsrb vec_vsrb @@ -6652,6 +7029,20 @@ vec_sra(vector unsigned int __a, vector unsigned int __b) return (vector unsigned int)__builtin_altivec_vsraw((vector int)__a, __b); } +#ifdef __POWER8_VECTOR__ +static vector signed long long __ATTRS_o_ai +vec_sra(vector signed long long __a, vector unsigned long long __b) +{ + return __a >> __b; +} + +static vector unsigned long long __ATTRS_o_ai +vec_sra(vector unsigned long long __a, vector unsigned long long __b) +{ + return (vector unsigned long long) ( (vector signed long long) __a >> __b); +} +#endif + /* vec_vsrab */ static vector signed char __ATTRS_o_ai @@ -8224,6 +8615,20 @@ vec_sub(vector unsigned int __a, vector bool int __b) return __a - (vector unsigned int)__b; } +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) +static vector signed __int128 __ATTRS_o_ai +vec_sub(vector signed __int128 __a, vector signed __int128 __b) +{ + return __a - __b; +} + +static vector unsigned __int128 __ATTRS_o_ai +vec_sub(vector unsigned __int128 __a, vector unsigned __int128 __b) +{ + return __a - __b; +} +#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__) + static vector float __ATTRS_o_ai vec_sub(vector float __a, vector float __b) { @@ -8362,12 +8767,26 @@ vec_vsubfp(vector float __a, vector float __b) /* vec_subc */ -static vector unsigned int __attribute__((__always_inline__)) +static vector unsigned int __ATTRS_o_ai vec_subc(vector unsigned int __a, vector unsigned int __b) { return __builtin_altivec_vsubcuw(__a, __b); } +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) +static vector unsigned __int128 __ATTRS_o_ai +vec_subc(vector unsigned __int128 __a, vector unsigned __int128 __b) +{ + return __builtin_altivec_vsubcuq(__a, __b); +} + +static vector signed __int128 __ATTRS_o_ai +vec_subc(vector signed __int128 __a, vector signed __int128 __b) +{ + return __builtin_altivec_vsubcuq(__a, __b); +} +#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__) + /* vec_vsubcuw */ static vector unsigned int __attribute__((__always_inline__)) @@ -8606,6 +9025,68 @@ vec_vsubuws(vector unsigned int __a, vector bool int __b) return __builtin_altivec_vsubuws(__a, (vector unsigned int)__b); } +#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) +/* vec_vsubuqm */ + +static vector signed __int128 __ATTRS_o_ai +vec_vsubuqm(vector signed __int128 __a, vector signed __int128 __b) +{ + return __a - __b; +} + +static vector unsigned __int128 __ATTRS_o_ai +vec_vsubuqm(vector unsigned __int128 __a, vector unsigned __int128 __b) +{ + return __a - __b; +} + +/* vec_vsubeuqm */ + +static vector signed __int128 __ATTRS_o_ai +vec_vsubeuqm(vector signed __int128 __a, vector signed __int128 __b, + vector signed __int128 __c) +{ + return __builtin_altivec_vsubeuqm(__a, __b, __c); +} + +static vector unsigned __int128 __ATTRS_o_ai +vec_vsubeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b, + vector unsigned __int128 __c) +{ + return __builtin_altivec_vsubeuqm(__a, __b, __c); +} + +/* vec_vsubcuq */ + +static vector signed __int128 __ATTRS_o_ai +vec_vsubcuq(vector signed __int128 __a, vector signed __int128 __b) +{ + return __builtin_altivec_vsubcuq(__a, __b); +} + +static vector unsigned __int128 __ATTRS_o_ai +vec_vsubcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) +{ + return __builtin_altivec_vsubcuq(__a, __b); +} + +/* vec_vsubecuq */ + +static vector signed __int128 __ATTRS_o_ai +vec_vsubecuq(vector signed __int128 __a, vector signed __int128 __b, + vector signed __int128 __c) +{ + return __builtin_altivec_vsubecuq(__a, __b, __c); +} + +static vector unsigned __int128 __ATTRS_o_ai +vec_vsubecuq(vector unsigned __int128 __a, vector unsigned __int128 __b, + vector unsigned __int128 __c) +{ + return __builtin_altivec_vsubecuq(__a, __b, __c); +} +#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__) + /* vec_sum4s */ static vector int __ATTRS_o_ai @@ -8797,6 +9278,28 @@ vec_unpackh(vector pixel __a) #endif } +#ifdef __POWER8_VECTOR__ +static vector long long __ATTRS_o_ai +vec_unpackh(vector int __a) +{ +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupklsw(__a); +#else + return __builtin_altivec_vupkhsw(__a); +#endif +} + +static vector bool long long __ATTRS_o_ai +vec_unpackh(vector bool int __a) +{ +#ifdef __LITTLE_ENDIAN__ + return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a); +#else + return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a); +#endif +} +#endif + /* vec_vupkhsb */ static vector short __ATTRS_o_ai @@ -8851,6 +9354,30 @@ vec_vupkhsh(vector pixel __a) #endif } +/* vec_vupkhsw */ + +#ifdef __POWER8_VECTOR__ +static vector long long __ATTRS_o_ai +vec_vupkhsw(vector int __a) +{ +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupklsw(__a); +#else + return __builtin_altivec_vupkhsw(__a); +#endif +} + +static vector bool long long __ATTRS_o_ai +vec_vupkhsw(vector bool int __a) +{ +#ifdef __LITTLE_ENDIAN__ + return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a); +#else + return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a); +#endif +} +#endif + /* vec_unpackl */ static vector short __ATTRS_o_ai @@ -8903,6 +9430,28 @@ vec_unpackl(vector pixel __a) #endif } +#ifdef __POWER8_VECTOR__ +static vector long long __ATTRS_o_ai +vec_unpackl(vector int __a) +{ +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupkhsw(__a); +#else + return __builtin_altivec_vupklsw(__a); +#endif +} + +static vector bool long long __ATTRS_o_ai +vec_unpackl(vector bool int __a) +{ +#ifdef __LITTLE_ENDIAN__ + return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a); +#else + return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a); +#endif +} +#endif + /* vec_vupklsb */ static vector short __ATTRS_o_ai @@ -8957,6 +9506,30 @@ vec_vupklsh(vector pixel __a) #endif } +/* vec_vupklsw */ + +#ifdef __POWER8_VECTOR__ +static vector long long __ATTRS_o_ai +vec_vupklsw(vector int __a) +{ +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vupkhsw(__a); +#else + return __builtin_altivec_vupklsw(__a); +#endif +} + +static vector bool long long __ATTRS_o_ai +vec_vupklsw(vector bool int __a) +{ +#ifdef __LITTLE_ENDIAN__ + return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a); +#else + return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a); +#endif +} +#endif + /* vec_vsx_ld */ #ifdef __VSX__ @@ -10887,6 +11460,55 @@ vec_all_eq(vector bool int __a, vector bool int __b) return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a, (vector int)__b); } +#ifdef __POWER8_VECTOR__ +static int __ATTRS_o_ai +vec_all_eq(vector signed long long __a, vector signed long long __b) +{ + return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, __b); +} + +static int __ATTRS_o_ai +vec_all_eq(vector long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, (vector long long)__b); +} + +static int __ATTRS_o_ai +vec_all_eq(vector unsigned long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a, + (vector long long)__b); +} + +static int __ATTRS_o_ai +vec_all_eq(vector unsigned long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a, + (vector long long)__b); +} + +static int __ATTRS_o_ai +vec_all_eq(vector bool long long __a, vector long long __b) +{ + return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a, + (vector long long)__b); +} + +static int __ATTRS_o_ai +vec_all_eq(vector bool long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a, + (vector long long)__b); +} + +static int __ATTRS_o_ai +vec_all_eq(vector bool long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a, + (vector long long)__b); +} +#endif + static int __ATTRS_o_ai vec_all_eq(vector float __a, vector float __b) { @@ -11033,6 +11655,56 @@ vec_all_ge(vector bool int __a, vector bool int __b) (vector unsigned int)__a); } +#ifdef __POWER8_VECTOR__ +static int __ATTRS_o_ai +vec_all_ge(vector signed long long __a, vector signed long long __b) +{ + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __b, __a); +} +static int __ATTRS_o_ai +vec_all_ge(vector signed long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, (vector signed long long)__b, + __a); +} + +static int __ATTRS_o_ai +vec_all_ge(vector unsigned long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __b, __a); +} + +static int __ATTRS_o_ai +vec_all_ge(vector unsigned long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b, + __a); +} + +static int __ATTRS_o_ai +vec_all_ge(vector bool long long __a, vector signed long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, + (vector unsigned long long)__b, + (vector unsigned long long)__a); +} + +static int __ATTRS_o_ai +vec_all_ge(vector bool long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __b, + (vector unsigned long long)__a); +} + +static int __ATTRS_o_ai +vec_all_ge(vector bool long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, + (vector unsigned long long)__b, + (vector unsigned long long)__a); +} +#endif + static int __ATTRS_o_ai vec_all_ge(vector float __a, vector float __b) { @@ -11179,6 +11851,56 @@ vec_all_gt(vector bool int __a, vector bool int __b) (vector unsigned int)__b); } +#ifdef __POWER8_VECTOR__ +static int __ATTRS_o_ai +vec_all_gt(vector signed long long __a, vector signed long long __b) +{ + return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __a, __b); +} +static int __ATTRS_o_ai +vec_all_gt(vector signed long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __a, + (vector signed long long)__b); +} + +static int __ATTRS_o_ai +vec_all_gt(vector unsigned long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_LT, __a, __b); +} + +static int __ATTRS_o_ai +vec_all_gt(vector unsigned long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_LT, __a, + (vector unsigned long long)__b); +} + +static int __ATTRS_o_ai +vec_all_gt(vector bool long long __a, vector signed long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_LT, + (vector unsigned long long)__a, + (vector unsigned long long)__b); +} + +static int __ATTRS_o_ai +vec_all_gt(vector bool long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a, + __b); +} + +static int __ATTRS_o_ai +vec_all_gt(vector bool long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_LT, + (vector unsigned long long)__a, + (vector unsigned long long)__b); +} +#endif + static int __ATTRS_o_ai vec_all_gt(vector float __a, vector float __b) { @@ -11333,6 +12055,57 @@ vec_all_le(vector bool int __a, vector bool int __b) (vector unsigned int)__b); } +#ifdef __POWER8_VECTOR__ +static int __ATTRS_o_ai +vec_all_le(vector signed long long __a, vector signed long long __b) +{ + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __a, __b); +} + +static int __ATTRS_o_ai +vec_all_le(vector unsigned long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __a, __b); +} + +static int __ATTRS_o_ai +vec_all_le(vector signed long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __a, + (vector signed long long)__b); +} + +static int __ATTRS_o_ai +vec_all_le(vector unsigned long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __a, + (vector unsigned long long)__b); +} + +static int __ATTRS_o_ai +vec_all_le(vector bool long long __a, vector signed long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, + (vector unsigned long long)__a, + (vector unsigned long long)__b); +} + +static int __ATTRS_o_ai +vec_all_le(vector bool long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a, + __b); +} + +static int __ATTRS_o_ai +vec_all_le(vector bool long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_EQ, + (vector unsigned long long)__a, + (vector unsigned long long)__b); +} +#endif + static int __ATTRS_o_ai vec_all_le(vector float __a, vector float __b) { @@ -11479,6 +12252,57 @@ vec_all_lt(vector bool int __a, vector bool int __b) (vector unsigned int)__a); } +#ifdef __POWER8_VECTOR__ +static int __ATTRS_o_ai +vec_all_lt(vector signed long long __a, vector signed long long __b) +{ + return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __b, __a); +} + +static int __ATTRS_o_ai +vec_all_lt(vector unsigned long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_LT, __b, __a); +} + +static int __ATTRS_o_ai +vec_all_lt(vector signed long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtsd_p(__CR6_LT, (vector signed long long)__b, + __a); +} + +static int __ATTRS_o_ai +vec_all_lt(vector unsigned long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b, + __a); +} + +static int __ATTRS_o_ai +vec_all_lt(vector bool long long __a, vector signed long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_LT, + (vector unsigned long long)__b, + (vector unsigned long long)__a); +} + +static int __ATTRS_o_ai +vec_all_lt(vector bool long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_LT, __b, + (vector unsigned long long)__a); +} + +static int __ATTRS_o_ai +vec_all_lt(vector bool long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_LT, + (vector unsigned long long)__b, + (vector unsigned long long)__a); +} +#endif + static int __ATTRS_o_ai vec_all_lt(vector float __a, vector float __b) { @@ -11633,6 +12457,56 @@ vec_all_ne(vector bool int __a, vector bool int __b) return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a, (vector int)__b); } +#ifdef __POWER8_VECTOR__ +static int __ATTRS_o_ai +vec_all_ne(vector signed long long __a, vector signed long long __b) +{ + return __builtin_altivec_vcmpequd_p(__CR6_EQ, __a, __b); +} + +static int __ATTRS_o_ai +vec_all_ne(vector unsigned long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector long long)__a, + (vector long long)__b); +} + +static int __ATTRS_o_ai +vec_all_ne(vector signed long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpequd_p(__CR6_EQ, __a, + (vector signed long long)__b); +} + +static int __ATTRS_o_ai +vec_all_ne(vector unsigned long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a, + (vector signed long long)__b); +} + +static int __ATTRS_o_ai +vec_all_ne(vector bool long long __a, vector signed long long __b) +{ + return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a, + (vector signed long long)__b); +} + +static int __ATTRS_o_ai +vec_all_ne(vector bool long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a, + (vector signed long long)__b); +} + +static int __ATTRS_o_ai +vec_all_ne(vector bool long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a, + (vector signed long long)__b); +} +#endif + static int __ATTRS_o_ai vec_all_ne(vector float __a, vector float __b) { @@ -11837,6 +12711,61 @@ vec_any_eq(vector bool int __a, vector bool int __b) __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a, (vector int)__b); } +#ifdef __POWER8_VECTOR__ +static int __ATTRS_o_ai +vec_any_eq(vector signed long long __a, vector signed long long __b) +{ + return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, __a, __b); +} + +static int __ATTRS_o_ai +vec_any_eq(vector unsigned long long __a, vector unsigned long long __b) +{ + return + __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, (vector long long)__a, + (vector long long)__b); +} + +static int __ATTRS_o_ai +vec_any_eq(vector signed long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, __a, + (vector signed long long)__b); +} + +static int __ATTRS_o_ai +vec_any_eq(vector unsigned long long __a, vector bool long long __b) +{ + return + __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, (vector signed long long)__a, + (vector signed long long)__b); +} + +static int __ATTRS_o_ai +vec_any_eq(vector bool long long __a, vector signed long long __b) +{ + return + __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, (vector signed long long)__a, + (vector signed long long)__b); +} + +static int __ATTRS_o_ai +vec_any_eq(vector bool long long __a, vector unsigned long long __b) +{ + return + __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, (vector signed long long)__a, + (vector signed long long)__b); +} + +static int __ATTRS_o_ai +vec_any_eq(vector bool long long __a, vector bool long long __b) +{ + return + __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, (vector signed long long)__a, + (vector signed long long)__b); +} +#endif + static int __ATTRS_o_ai vec_any_eq(vector float __a, vector float __b) { @@ -11985,6 +12914,57 @@ vec_any_ge(vector bool int __a, vector bool int __b) (vector unsigned int)__a); } +#ifdef __POWER8_VECTOR__ +static int __ATTRS_o_ai +vec_any_ge(vector signed long long __a, vector signed long long __b) +{ + return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __b, __a); +} + +static int __ATTRS_o_ai +vec_any_ge(vector unsigned long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __b, __a); +} + +static int __ATTRS_o_ai +vec_any_ge(vector signed long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, + (vector signed long long)__b, __a); +} + +static int __ATTRS_o_ai +vec_any_ge(vector unsigned long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, + (vector unsigned long long)__b, __a); +} + +static int __ATTRS_o_ai +vec_any_ge(vector bool long long __a, vector signed long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, + (vector unsigned long long)__b, + (vector unsigned long long)__a); +} + +static int __ATTRS_o_ai +vec_any_ge(vector bool long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __b, + (vector unsigned long long)__a); +} + +static int __ATTRS_o_ai +vec_any_ge(vector bool long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, + (vector unsigned long long)__b, + (vector unsigned long long)__a); +} +#endif + static int __ATTRS_o_ai vec_any_ge(vector float __a, vector float __b) { @@ -12135,6 +13115,58 @@ vec_any_gt(vector bool int __a, vector bool int __b) (vector unsigned int)__b); } +#ifdef __POWER8_VECTOR__ +static int __ATTRS_o_ai +vec_any_gt(vector signed long long __a, vector signed long long __b) +{ + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __a, __b); +} + +static int __ATTRS_o_ai +vec_any_gt(vector unsigned long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __a, __b); +} + +static int __ATTRS_o_ai +vec_any_gt(vector signed long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __a, + (vector signed long long)__b); +} + + +static int __ATTRS_o_ai +vec_any_gt(vector unsigned long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __a, + (vector unsigned long long)__b); +} + +static int __ATTRS_o_ai +vec_any_gt(vector bool long long __a, vector signed long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, + (vector unsigned long long)__a, + (vector unsigned long long)__b); +} + +static int __ATTRS_o_ai +vec_any_gt(vector bool long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, + (vector unsigned long long)__a, __b); +} + +static int __ATTRS_o_ai +vec_any_gt(vector bool long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, + (vector unsigned long long)__a, + (vector unsigned long long)__b); +} +#endif + static int __ATTRS_o_ai vec_any_gt(vector float __a, vector float __b) { @@ -12285,6 +13317,57 @@ vec_any_le(vector bool int __a, vector bool int __b) (vector unsigned int)__b); } +#ifdef __POWER8_VECTOR__ +static int __ATTRS_o_ai +vec_any_le(vector signed long long __a, vector signed long long __b) +{ + return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __a, __b); +} + +static int __ATTRS_o_ai +vec_any_le(vector unsigned long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __a, __b); +} + +static int __ATTRS_o_ai +vec_any_le(vector signed long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __a, + (vector signed long long)__b); +} + +static int __ATTRS_o_ai +vec_any_le(vector unsigned long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __a, + (vector unsigned long long)__b); +} + +static int __ATTRS_o_ai +vec_any_le(vector bool long long __a, vector signed long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, + (vector unsigned long long)__a, + (vector unsigned long long)__b); +} + +static int __ATTRS_o_ai +vec_any_le(vector bool long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, + (vector unsigned long long)__a, __b); +} + +static int __ATTRS_o_ai +vec_any_le(vector bool long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, + (vector unsigned long long)__a, + (vector unsigned long long)__b); +} +#endif + static int __ATTRS_o_ai vec_any_le(vector float __a, vector float __b) { @@ -12435,6 +13518,57 @@ vec_any_lt(vector bool int __a, vector bool int __b) (vector unsigned int)__a); } +#ifdef __POWER8_VECTOR__ +static int __ATTRS_o_ai +vec_any_lt(vector signed long long __a, vector signed long long __b) +{ + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __b, __a); +} + +static int __ATTRS_o_ai +vec_any_lt(vector unsigned long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __b, __a); +} + +static int __ATTRS_o_ai +vec_any_lt(vector signed long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, + (vector signed long long)__b, __a); +} + +static int __ATTRS_o_ai +vec_any_lt(vector unsigned long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, + (vector unsigned long long)__b, __a); +} + +static int __ATTRS_o_ai +vec_any_lt(vector bool long long __a, vector signed long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, + (vector unsigned long long)__b, + (vector unsigned long long)__a); +} + +static int __ATTRS_o_ai +vec_any_lt(vector bool long long __a, vector unsigned long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __b, + (vector unsigned long long)__a); +} + +static int __ATTRS_o_ai +vec_any_lt(vector bool long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, + (vector unsigned long long)__b, + (vector unsigned long long)__a); +} +#endif + static int __ATTRS_o_ai vec_any_lt(vector float __a, vector float __b) { @@ -12607,6 +13741,61 @@ vec_any_ne(vector bool int __a, vector bool int __b) __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a, (vector int)__b); } +#ifdef __POWER8_VECTOR__ +static int __ATTRS_o_ai +vec_any_ne(vector signed long long __a, vector signed long long __b) +{ + return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a, __b); +} + +static int __ATTRS_o_ai +vec_any_ne(vector unsigned long long __a, vector unsigned long long __b) +{ + return + __builtin_altivec_vcmpequd_p(__CR6_LT_REV, (vector long long)__a, + (vector long long)__b); +} + +static int __ATTRS_o_ai +vec_any_ne(vector signed long long __a, vector bool long long __b) +{ + return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a, + (vector signed long long)__b); +} + +static int __ATTRS_o_ai +vec_any_ne(vector unsigned long long __a, vector bool long long __b) +{ + return + __builtin_altivec_vcmpequd_p(__CR6_LT_REV, (vector signed long long)__a, + (vector signed long long)__b); +} + +static int __ATTRS_o_ai +vec_any_ne(vector bool long long __a, vector signed long long __b) +{ + return + __builtin_altivec_vcmpequd_p(__CR6_LT_REV, (vector signed long long)__a, + (vector signed long long)__b); +} + +static int __ATTRS_o_ai +vec_any_ne(vector bool long long __a, vector unsigned long long __b) +{ + return + __builtin_altivec_vcmpequd_p(__CR6_LT_REV, (vector signed long long)__a, + (vector signed long long)__b); +} + +static int __ATTRS_o_ai +vec_any_ne(vector bool long long __a, vector bool long long __b) +{ + return + __builtin_altivec_vcmpequd_p(__CR6_LT_REV, (vector signed long long)__a, + (vector signed long long)__b); +} +#endif + static int __ATTRS_o_ai vec_any_ne(vector float __a, vector float __b) { @@ -12661,6 +13850,133 @@ vec_any_out(vector float __a, vector float __b) return __builtin_altivec_vcmpbfp_p(__CR6_EQ_REV, __a, __b); } +/* Power 8 Crypto functions +Note: We diverge from the current GCC implementation with regard +to cryptography and related functions as follows: +- Only the SHA and AES instructions and builtins are disabled by -mno-crypto +- The remaining ones are only available on Power8 and up so + require -mpower8-vector +The justification for this is that export requirements require that +Category:Vector.Crypto is optional (i.e. compliant hardware may not provide +support). As a result, we need to be able to turn off support for those. +The remaining ones (currently controlled by -mcrypto for GCC) still +need to be provided on compliant hardware even if Vector.Crypto is not +provided. +FIXME: the naming convention for the builtins will be adjusted due +to the inconsistency (__builtin_crypto_ prefix on builtins that cannot be +removed with -mno-crypto). This is under development. +*/ +#ifdef __CRYPTO__ +static vector unsigned long long __attribute__((__always_inline__)) +__builtin_crypto_vsbox (vector unsigned long long __a) +{ + return __builtin_altivec_crypto_vsbox(__a); +} + +static vector unsigned long long __attribute__((__always_inline__)) +__builtin_crypto_vcipher (vector unsigned long long __a, + vector unsigned long long __b) +{ + return __builtin_altivec_crypto_vcipher(__a, __b); +} + +static vector unsigned long long __attribute__((__always_inline__)) +__builtin_crypto_vcipherlast (vector unsigned long long __a, + vector unsigned long long __b) +{ + return __builtin_altivec_crypto_vcipherlast(__a, __b); +} + +static vector unsigned long long __attribute__((__always_inline__)) +__builtin_crypto_vncipher (vector unsigned long long __a, + vector unsigned long long __b) +{ + return __builtin_altivec_crypto_vncipher(__a, __b); +} + +static vector unsigned long long __attribute__((__always_inline__)) +__builtin_crypto_vncipherlast (vector unsigned long long __a, + vector unsigned long long __b) +{ + return __builtin_altivec_crypto_vncipherlast(__a, __b); +} + + +#define __builtin_crypto_vshasigmad __builtin_altivec_crypto_vshasigmad +#define __builtin_crypto_vshasigmaw __builtin_altivec_crypto_vshasigmaw +#endif + +#ifdef __POWER8_VECTOR__ +static vector unsigned char __ATTRS_o_ai +__builtin_crypto_vpermxor (vector unsigned char __a, + vector unsigned char __b, + vector unsigned char __c) +{ + return __builtin_altivec_crypto_vpermxor(__a, __b, __c); +} + +static vector unsigned short __ATTRS_o_ai +__builtin_crypto_vpermxor (vector unsigned short __a, + vector unsigned short __b, + vector unsigned short __c) +{ + return (vector unsigned short) + __builtin_altivec_crypto_vpermxor((vector unsigned char) __a, + (vector unsigned char) __b, + (vector unsigned char) __c); +} + +static vector unsigned int __ATTRS_o_ai +__builtin_crypto_vpermxor (vector unsigned int __a, + vector unsigned int __b, + vector unsigned int __c) +{ + return (vector unsigned int) + __builtin_altivec_crypto_vpermxor((vector unsigned char) __a, + (vector unsigned char) __b, + (vector unsigned char) __c); +} + +static vector unsigned long long __ATTRS_o_ai +__builtin_crypto_vpermxor (vector unsigned long long __a, + vector unsigned long long __b, + vector unsigned long long __c) +{ + return (vector unsigned long long) + __builtin_altivec_crypto_vpermxor((vector unsigned char) __a, + (vector unsigned char) __b, + (vector unsigned char) __c); +} + +static vector unsigned char __ATTRS_o_ai +__builtin_crypto_vpmsumb (vector unsigned char __a, + vector unsigned char __b) +{ + return __builtin_altivec_crypto_vpmsumb(__a, __b); +} + +static vector unsigned short __ATTRS_o_ai +__builtin_crypto_vpmsumb (vector unsigned short __a, + vector unsigned short __b) +{ + return __builtin_altivec_crypto_vpmsumh(__a, __b); +} + +static vector unsigned int __ATTRS_o_ai +__builtin_crypto_vpmsumb (vector unsigned int __a, + vector unsigned int __b) +{ + return __builtin_altivec_crypto_vpmsumw(__a, __b); +} + +static vector unsigned long long __ATTRS_o_ai +__builtin_crypto_vpmsumb (vector unsigned long long __a, + vector unsigned long long __b) +{ + return __builtin_altivec_crypto_vpmsumd(__a, __b); +} +#endif + #undef __ATTRS_o_ai #endif /* __ALTIVEC_H */ diff --git a/contrib/llvm/tools/clang/lib/Headers/arm_acle.h b/contrib/llvm/tools/clang/lib/Headers/arm_acle.h index 814df2c..6c56f3b 100644 --- a/contrib/llvm/tools/clang/lib/Headers/arm_acle.h +++ b/contrib/llvm/tools/clang/lib/Headers/arm_acle.h @@ -45,23 +45,23 @@ extern "C" { /* 8.4 Hints */ #if !defined(_MSC_VER) -static __inline__ void __attribute__((always_inline, nodebug)) __wfi(void) { +static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfi(void) { __builtin_arm_wfi(); } -static __inline__ void __attribute__((always_inline, nodebug)) __wfe(void) { +static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfe(void) { __builtin_arm_wfe(); } -static __inline__ void __attribute__((always_inline, nodebug)) __sev(void) { +static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sev(void) { __builtin_arm_sev(); } -static __inline__ void __attribute__((always_inline, nodebug)) __sevl(void) { +static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sevl(void) { __builtin_arm_sevl(); } -static __inline__ void __attribute__((always_inline, nodebug)) __yield(void) { +static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(void) { __builtin_arm_yield(); } #endif @@ -71,7 +71,7 @@ static __inline__ void __attribute__((always_inline, nodebug)) __yield(void) { #endif /* 8.5 Swap */ -static __inline__ uint32_t __attribute__((always_inline, nodebug)) +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) __swp(uint32_t x, volatile uint32_t *p) { uint32_t v; do v = __builtin_arm_ldrex(p); while (__builtin_arm_strex(x, p)); @@ -102,28 +102,28 @@ static __inline__ uint32_t __attribute__((always_inline, nodebug)) #endif /* 8.7 NOP */ -static __inline__ void __attribute__((always_inline, nodebug)) __nop(void) { +static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(void) { __builtin_arm_nop(); } /* 9 DATA-PROCESSING INTRINSICS */ /* 9.2 Miscellaneous data-processing intrinsics */ /* ROR */ -static __inline__ uint32_t __attribute__((always_inline, nodebug)) +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) __ror(uint32_t x, uint32_t y) { y %= 32; if (y == 0) return x; return (x >> y) | (x << (32 - y)); } -static __inline__ uint64_t __attribute__((always_inline, nodebug)) +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) __rorll(uint64_t x, uint32_t y) { y %= 64; if (y == 0) return x; return (x >> y) | (x << (64 - y)); } -static __inline__ unsigned long __attribute__((always_inline, nodebug)) +static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) __rorl(unsigned long x, uint32_t y) { #if __SIZEOF_LONG__ == 4 return __ror(x, y); @@ -134,28 +134,28 @@ static __inline__ unsigned long __attribute__((always_inline, nodebug)) /* CLZ */ -static __inline__ uint32_t __attribute__((always_inline, nodebug)) +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) __clz(uint32_t t) { return __builtin_clz(t); } -static __inline__ unsigned long __attribute__((always_inline, nodebug)) +static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) __clzl(unsigned long t) { return __builtin_clzl(t); } -static __inline__ uint64_t __attribute__((always_inline, nodebug)) +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) __clzll(uint64_t t) { return __builtin_clzll(t); } /* REV */ -static __inline__ uint32_t __attribute__((always_inline, nodebug)) +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) __rev(uint32_t t) { return __builtin_bswap32(t); } -static __inline__ unsigned long __attribute__((always_inline, nodebug)) +static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) __revl(unsigned long t) { #if __SIZEOF_LONG__ == 4 return __builtin_bswap32(t); @@ -164,40 +164,40 @@ static __inline__ unsigned long __attribute__((always_inline, nodebug)) #endif } -static __inline__ uint64_t __attribute__((always_inline, nodebug)) +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) __revll(uint64_t t) { return __builtin_bswap64(t); } /* REV16 */ -static __inline__ uint32_t __attribute__((always_inline, nodebug)) +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) __rev16(uint32_t t) { return __ror(__rev(t), 16); } -static __inline__ unsigned long __attribute__((always_inline, nodebug)) +static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) __rev16l(unsigned long t) { return __rorl(__revl(t), sizeof(long) / 2); } -static __inline__ uint64_t __attribute__((always_inline, nodebug)) +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) __rev16ll(uint64_t t) { return __rorll(__revll(t), 32); } /* REVSH */ -static __inline__ int16_t __attribute__((always_inline, nodebug)) +static __inline__ int16_t __attribute__((__always_inline__, __nodebug__)) __revsh(int16_t t) { return __builtin_bswap16(t); } /* RBIT */ -static __inline__ uint32_t __attribute__((always_inline, nodebug)) +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) __rbit(uint32_t t) { return __builtin_arm_rbit(t); } -static __inline__ uint64_t __attribute__((always_inline, nodebug)) +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) __rbitll(uint64_t t) { #if __ARM_32BIT_STATE return (((uint64_t) __builtin_arm_rbit(t)) << 32) | @@ -207,7 +207,7 @@ static __inline__ uint64_t __attribute__((always_inline, nodebug)) #endif } -static __inline__ unsigned long __attribute__((always_inline, nodebug)) +static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__)) __rbitl(unsigned long t) { #if __SIZEOF_LONG__ == 4 return __rbit(t); @@ -230,17 +230,17 @@ static __inline__ unsigned long __attribute__((always_inline, nodebug)) /* 9.4.2 Saturating addition and subtraction intrinsics */ #if __ARM_32BIT_STATE -static __inline__ int32_t __attribute__((always_inline, nodebug)) +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) __qadd(int32_t t, int32_t v) { return __builtin_arm_qadd(t, v); } -static __inline__ int32_t __attribute__((always_inline, nodebug)) +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) __qsub(int32_t t, int32_t v) { return __builtin_arm_qsub(t, v); } -static __inline__ int32_t __attribute__((always_inline, nodebug)) +static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) __qdbl(int32_t t) { return __builtin_arm_qadd(t, t); } @@ -248,42 +248,42 @@ __qdbl(int32_t t) { /* 9.7 CRC32 intrinsics */ #if __ARM_FEATURE_CRC32 -static __inline__ uint32_t __attribute__((always_inline, nodebug)) +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) __crc32b(uint32_t a, uint8_t b) { return __builtin_arm_crc32b(a, b); } -static __inline__ uint32_t __attribute__((always_inline, nodebug)) +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) __crc32h(uint32_t a, uint16_t b) { return __builtin_arm_crc32h(a, b); } -static __inline__ uint32_t __attribute__((always_inline, nodebug)) +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) __crc32w(uint32_t a, uint32_t b) { return __builtin_arm_crc32w(a, b); } -static __inline__ uint32_t __attribute__((always_inline, nodebug)) +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) __crc32d(uint32_t a, uint64_t b) { return __builtin_arm_crc32d(a, b); } -static __inline__ uint32_t __attribute__((always_inline, nodebug)) +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) __crc32cb(uint32_t a, uint8_t b) { return __builtin_arm_crc32cb(a, b); } -static __inline__ uint32_t __attribute__((always_inline, nodebug)) +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) __crc32ch(uint32_t a, uint16_t b) { return __builtin_arm_crc32ch(a, b); } -static __inline__ uint32_t __attribute__((always_inline, nodebug)) +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) __crc32cw(uint32_t a, uint32_t b) { return __builtin_arm_crc32cw(a, b); } -static __inline__ uint32_t __attribute__((always_inline, nodebug)) +static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) __crc32cd(uint32_t a, uint64_t b) { return __builtin_arm_crc32cd(a, b); } diff --git a/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h b/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h index 394fdfe..e1e639d 100644 --- a/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h +++ b/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h @@ -160,7 +160,7 @@ _mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M) #define _mm256_blend_epi16(V1, V2, M) __extension__ ({ \ __m256i __V1 = (V1); \ __m256i __V2 = (V2); \ - (__m256d)__builtin_shufflevector((__v16hi)__V1, (__v16hi)__V2, \ + (__m256i)__builtin_shufflevector((__v16hi)__V1, (__v16hi)__V2, \ (((M) & 0x01) ? 16 : 0), \ (((M) & 0x02) ? 17 : 1), \ (((M) & 0x04) ? 18 : 2), \ @@ -542,6 +542,8 @@ _mm256_sign_epi32(__m256i __a, __m256i __b) __m256i __a = (a); \ (__m256i)__builtin_ia32_pslldqi256(__a, (count)*8); }) +#define _mm256_bslli_epi128(a, count) _mm256_slli_si256((a), (count)) + static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) _mm256_slli_epi16(__m256i __a, int __count) { @@ -606,6 +608,8 @@ _mm256_sra_epi32(__m256i __a, __m128i __count) __m256i __a = (a); \ (__m256i)__builtin_ia32_psrldqi256(__a, (count)*8); }) +#define _mm256_bsrli_epi128(a, count) _mm256_srli_si256((a), (count)) + static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) _mm256_srli_epi16(__m256i __a, int __count) { @@ -756,6 +760,12 @@ _mm_broadcastss_ps(__m128 __X) return (__m128)__builtin_ia32_vbroadcastss_ps((__v4sf)__X); } +static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) +_mm_broadcastsd_pd(__m128d __a) +{ + return __builtin_shufflevector(__a, __a, 0, 0); +} + static __inline__ __m256 __attribute__((__always_inline__, __nodebug__)) _mm256_broadcastss_ps(__m128 __X) { @@ -771,7 +781,7 @@ _mm256_broadcastsd_pd(__m128d __X) static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) _mm256_broadcastsi128_si256(__m128i __X) { - return (__m256i)__builtin_ia32_vbroadcastsi256(__X); + return (__m256i)__builtin_shufflevector(__X, __X, 0, 1, 0, 1); } #define _mm_blend_epi32(V1, V2, M) __extension__ ({ \ @@ -874,14 +884,21 @@ _mm256_permutevar8x32_ps(__m256 __a, __m256 __b) __m256i __V2 = (V2); \ (__m256i)__builtin_ia32_permti256(__V1, __V2, (M)); }) -#define _mm256_extracti128_si256(A, O) __extension__ ({ \ - __m256i __A = (A); \ - (__m128i)__builtin_ia32_extract128i256(__A, (O)); }) - -#define _mm256_inserti128_si256(V1, V2, O) __extension__ ({ \ - __m256i __V1 = (V1); \ - __m128i __V2 = (V2); \ - (__m256i)__builtin_ia32_insert128i256(__V1, __V2, (O)); }) +#define _mm256_extracti128_si256(V, M) __extension__ ({ \ + (__m128i)__builtin_shufflevector( \ + (__v4di)(V), \ + (__v4di)(_mm256_setzero_si256()), \ + (((M) & 1) ? 2 : 0), \ + (((M) & 1) ? 3 : 1) );}) + +#define _mm256_inserti128_si256(V1, V2, M) __extension__ ({ \ + (__m256i)__builtin_shufflevector( \ + (__v4di)(V1), \ + (__v4di)_mm256_castsi128_si256((__m128i)(V2)), \ + (((M) & 1) ? 0 : 4), \ + (((M) & 1) ? 1 : 5), \ + (((M) & 1) ? 4 : 2), \ + (((M) & 1) ? 5 : 3) );}) static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) _mm256_maskload_epi32(int const *__X, __m256i __M) diff --git a/contrib/llvm/tools/clang/lib/Headers/avx512bwintrin.h b/contrib/llvm/tools/clang/lib/Headers/avx512bwintrin.h index bc4d4ac..d0591e4 100644 --- a/contrib/llvm/tools/clang/lib/Headers/avx512bwintrin.h +++ b/contrib/llvm/tools/clang/lib/Headers/avx512bwintrin.h @@ -21,15 +21,37 @@ * *===-----------------------------------------------------------------------=== */ +#ifndef __IMMINTRIN_H +#error "Never use <avx512bwintrin.h> directly; include <immintrin.h> instead." +#endif #ifndef __AVX512BWINTRIN_H #define __AVX512BWINTRIN_H typedef unsigned int __mmask32; typedef unsigned long long __mmask64; -typedef char __v64qi __attribute__ ((vector_size (64))); +typedef char __v64qi __attribute__ ((__vector_size__ (64))); typedef short __v32hi __attribute__ ((__vector_size__ (64))); +static __inline __v64qi __attribute__ ((__always_inline__, __nodebug__)) +_mm512_setzero_qi (void) { + return (__v64qi){ 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0 }; +} + +static __inline __v32hi __attribute__ ((__always_inline__, __nodebug__)) +_mm512_setzero_hi (void) { + return (__v32hi){ 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0 }; +} /* Integer compare */ @@ -45,6 +67,18 @@ _mm512_mask_cmpeq_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) { __u); } +static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpeq_epu8_mask(__m512i __a, __m512i __b) { + return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 0, + (__mmask64)-1); +} + +static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpeq_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) { + return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 0, + __u); +} + static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) _mm512_cmpeq_epi16_mask(__m512i __a, __m512i __b) { return (__mmask32)__builtin_ia32_pcmpeqw512_mask((__v32hi)__a, (__v32hi)__b, @@ -57,4 +91,406 @@ _mm512_mask_cmpeq_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) { __u); } +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpeq_epu16_mask(__m512i __a, __m512i __b) { + return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 0, + (__mmask32)-1); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpeq_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) { + return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 0, + __u); +} + +static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpge_epi8_mask(__m512i __a, __m512i __b) { + return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 5, + (__mmask64)-1); +} + +static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpge_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) { + return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 5, + __u); +} + +static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpge_epu8_mask(__m512i __a, __m512i __b) { + return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 5, + (__mmask64)-1); +} + +static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpge_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) { + return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 5, + __u); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpge_epi16_mask(__m512i __a, __m512i __b) { + return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 5, + (__mmask32)-1); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpge_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) { + return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 5, + __u); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpge_epu16_mask(__m512i __a, __m512i __b) { + return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 5, + (__mmask32)-1); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpge_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) { + return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 5, + __u); +} + +static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpgt_epi8_mask(__m512i __a, __m512i __b) { + return (__mmask64)__builtin_ia32_pcmpgtb512_mask((__v64qi)__a, (__v64qi)__b, + (__mmask64)-1); +} + +static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpgt_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) { + return (__mmask64)__builtin_ia32_pcmpgtb512_mask((__v64qi)__a, (__v64qi)__b, + __u); +} + +static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpgt_epu8_mask(__m512i __a, __m512i __b) { + return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 6, + (__mmask64)-1); +} + +static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpgt_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) { + return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 6, + __u); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpgt_epi16_mask(__m512i __a, __m512i __b) { + return (__mmask32)__builtin_ia32_pcmpgtw512_mask((__v32hi)__a, (__v32hi)__b, + (__mmask32)-1); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpgt_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) { + return (__mmask32)__builtin_ia32_pcmpgtw512_mask((__v32hi)__a, (__v32hi)__b, + __u); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpgt_epu16_mask(__m512i __a, __m512i __b) { + return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 6, + (__mmask32)-1); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpgt_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) { + return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 6, + __u); +} + +static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmple_epi8_mask(__m512i __a, __m512i __b) { + return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 2, + (__mmask64)-1); +} + +static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmple_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) { + return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 2, + __u); +} + +static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmple_epu8_mask(__m512i __a, __m512i __b) { + return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 2, + (__mmask64)-1); +} + +static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmple_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) { + return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 2, + __u); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmple_epi16_mask(__m512i __a, __m512i __b) { + return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 2, + (__mmask32)-1); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmple_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) { + return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 2, + __u); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmple_epu16_mask(__m512i __a, __m512i __b) { + return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 2, + (__mmask32)-1); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmple_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) { + return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 2, + __u); +} + +static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmplt_epi8_mask(__m512i __a, __m512i __b) { + return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 1, + (__mmask64)-1); +} + +static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmplt_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) { + return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 1, + __u); +} + +static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmplt_epu8_mask(__m512i __a, __m512i __b) { + return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 1, + (__mmask64)-1); +} + +static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmplt_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) { + return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 1, + __u); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmplt_epi16_mask(__m512i __a, __m512i __b) { + return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 1, + (__mmask32)-1); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmplt_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) { + return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 1, + __u); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmplt_epu16_mask(__m512i __a, __m512i __b) { + return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 1, + (__mmask32)-1); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmplt_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) { + return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 1, + __u); +} + +static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpneq_epi8_mask(__m512i __a, __m512i __b) { + return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 4, + (__mmask64)-1); +} + +static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpneq_epi8_mask(__mmask64 __u, __m512i __a, __m512i __b) { + return (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)__a, (__v64qi)__b, 4, + __u); +} + +static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpneq_epu8_mask(__m512i __a, __m512i __b) { + return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 4, + (__mmask64)-1); +} + +static __inline__ __mmask64 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpneq_epu8_mask(__mmask64 __u, __m512i __a, __m512i __b) { + return (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)__a, (__v64qi)__b, 4, + __u); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpneq_epi16_mask(__m512i __a, __m512i __b) { + return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 4, + (__mmask32)-1); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpneq_epi16_mask(__mmask32 __u, __m512i __a, __m512i __b) { + return (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)__a, (__v32hi)__b, 4, + __u); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpneq_epu16_mask(__m512i __a, __m512i __b) { + return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 4, + (__mmask32)-1); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpneq_epu16_mask(__mmask32 __u, __m512i __a, __m512i __b) { + return (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)__a, (__v32hi)__b, 4, + __u); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_add_epi8 (__m512i __A, __m512i __B) { + return (__m512i) ((__v64qi) __A + (__v64qi) __B); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_add_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i) __builtin_ia32_paddb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) __W, + (__mmask64) __U); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_maskz_add_epi8 (__mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i) __builtin_ia32_paddb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_qi (), + (__mmask64) __U); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_sub_epi8 (__m512i __A, __m512i __B) { + return (__m512i) ((__v64qi) __A - (__v64qi) __B); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_sub_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i) __builtin_ia32_psubb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) __W, + (__mmask64) __U); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_maskz_sub_epi8 (__mmask64 __U, __m512i __A, __m512i __B) { + return (__m512i) __builtin_ia32_psubb512_mask ((__v64qi) __A, + (__v64qi) __B, + (__v64qi) + _mm512_setzero_qi (), + (__mmask64) __U); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_add_epi16 (__m512i __A, __m512i __B) { + return (__m512i) ((__v32hi) __A + (__v32hi) __B); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_add_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i) __builtin_ia32_paddw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_maskz_add_epi16 (__mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i) __builtin_ia32_paddw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_hi (), + (__mmask32) __U); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_sub_epi16 (__m512i __A, __m512i __B) { + return (__m512i) ((__v32hi) __A - (__v32hi) __B); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_sub_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i) __builtin_ia32_psubw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_maskz_sub_epi16 (__mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i) __builtin_ia32_psubw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_hi (), + (__mmask32) __U); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_mullo_epi16 (__m512i __A, __m512i __B) { + return (__m512i) ((__v32hi) __A * (__v32hi) __B); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_mullo_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i) __builtin_ia32_pmullw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) __W, + (__mmask32) __U); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_maskz_mullo_epi16 (__mmask32 __U, __m512i __A, __m512i __B) { + return (__m512i) __builtin_ia32_pmullw512_mask ((__v32hi) __A, + (__v32hi) __B, + (__v32hi) + _mm512_setzero_hi (), + (__mmask32) __U); +} + +#define _mm512_cmp_epi8_mask(a, b, p) __extension__ ({ \ + (__mmask16)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \ + (__v64qi)(__m512i)(b), \ + (p), (__mmask64)-1); }) + +#define _mm512_mask_cmp_epi8_mask(m, a, b, p) __extension__ ({ \ + (__mmask16)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \ + (__v64qi)(__m512i)(b), \ + (p), (__mmask64)(m)); }) + +#define _mm512_cmp_epu8_mask(a, b, p) __extension__ ({ \ + (__mmask16)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \ + (__v64qi)(__m512i)(b), \ + (p), (__mmask64)-1); }) + +#define _mm512_mask_cmp_epu8_mask(m, a, b, p) __extension__ ({ \ + (__mmask16)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \ + (__v64qi)(__m512i)(b), \ + (p), (__mmask64)(m)); }) + +#define _mm512_cmp_epi16_mask(a, b, p) __extension__ ({ \ + (__mmask16)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \ + (__v32hi)(__m512i)(b), \ + (p), (__mmask32)-1); }) + +#define _mm512_mask_cmp_epi16_mask(m, a, b, p) __extension__ ({ \ + (__mmask16)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \ + (__v32hi)(__m512i)(b), \ + (p), (__mmask32)(m)); }) + +#define _mm512_cmp_epu16_mask(a, b, p) __extension__ ({ \ + (__mmask16)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \ + (__v32hi)(__m512i)(b), \ + (p), (__mmask32)-1); }) + +#define _mm512_mask_cmp_epu16_mask(m, a, b, p) __extension__ ({ \ + (__mmask16)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \ + (__v32hi)(__m512i)(b), \ + (p), (__mmask32)(m)); }) + #endif diff --git a/contrib/llvm/tools/clang/lib/Headers/avx512dqintrin.h b/contrib/llvm/tools/clang/lib/Headers/avx512dqintrin.h new file mode 100644 index 0000000..fd33be2 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/Headers/avx512dqintrin.h @@ -0,0 +1,237 @@ +/*===---- avx512dqintrin.h - AVX512DQ intrinsics ---------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use <avx512dqintrin.h> directly; include <immintrin.h> instead." +#endif + +#ifndef __AVX512DQINTRIN_H +#define __AVX512DQINTRIN_H + +static __inline__ __m512i __attribute__ ((__always_inline__, __nodebug__)) +_mm512_mullo_epi64 (__m512i __A, __m512i __B) { + return (__m512i) ((__v8di) __A * (__v8di) __B); +} + +static __inline__ __m512i __attribute__ ((__always_inline__, __nodebug__)) +_mm512_mask_mullo_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) { + return (__m512i) __builtin_ia32_pmullq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline__ __m512i __attribute__ ((__always_inline__, __nodebug__)) +_mm512_maskz_mullo_epi64 (__mmask8 __U, __m512i __A, __m512i __B) { + return (__m512i) __builtin_ia32_pmullq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__)) +_mm512_xor_pd (__m512d __A, __m512d __B) { + return (__m512d) ((__v8di) __A ^ (__v8di) __B); +} + +static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__)) +_mm512_mask_xor_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d) __builtin_ia32_xorpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__)) +_mm512_maskz_xor_pd (__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d) __builtin_ia32_xorpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__)) +_mm512_xor_ps (__m512 __A, __m512 __B) { + return (__m512) ((__v16si) __A ^ (__v16si) __B); +} + +static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__)) +_mm512_mask_xor_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512) __builtin_ia32_xorps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__)) +_mm512_maskz_xor_ps (__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512) __builtin_ia32_xorps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__)) +_mm512_or_pd (__m512d __A, __m512d __B) { + return (__m512d) ((__v8di) __A | (__v8di) __B); +} + +static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__)) +_mm512_mask_or_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d) __builtin_ia32_orpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__)) +_mm512_maskz_or_pd (__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d) __builtin_ia32_orpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__)) +_mm512_or_ps (__m512 __A, __m512 __B) { + return (__m512) ((__v16si) __A | (__v16si) __B); +} + +static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__)) +_mm512_mask_or_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512) __builtin_ia32_orps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__)) +_mm512_maskz_or_ps (__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512) __builtin_ia32_orps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__)) +_mm512_and_pd (__m512d __A, __m512d __B) { + return (__m512d) ((__v8di) __A & (__v8di) __B); +} + +static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__)) +_mm512_mask_and_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d) __builtin_ia32_andpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__)) +_mm512_maskz_and_pd (__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d) __builtin_ia32_andpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__)) +_mm512_and_ps (__m512 __A, __m512 __B) { + return (__m512) ((__v16si) __A & (__v16si) __B); +} + +static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__)) +_mm512_mask_and_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512) __builtin_ia32_andps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__)) +_mm512_maskz_and_ps (__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512) __builtin_ia32_andps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__)) +_mm512_andnot_pd (__m512d __A, __m512d __B) { + return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) -1); +} + +static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__)) +_mm512_mask_andnot_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) __W, + (__mmask8) __U); +} + +static __inline__ __m512d __attribute__ ((__always_inline__, __nodebug__)) +_mm512_maskz_andnot_pd (__mmask8 __U, __m512d __A, __m512d __B) { + return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A, + (__v8df) __B, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__)) +_mm512_andnot_ps (__m512 __A, __m512 __B) { + return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) -1); +} + +static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__)) +_mm512_mask_andnot_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) { + return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) __W, + (__mmask16) __U); +} + +static __inline__ __m512 __attribute__ ((__always_inline__, __nodebug__)) +_mm512_maskz_andnot_ps (__mmask16 __U, __m512 __A, __m512 __B) { + return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A, + (__v16sf) __B, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +#endif diff --git a/contrib/llvm/tools/clang/lib/Headers/avx512erintrin.h b/contrib/llvm/tools/clang/lib/Headers/avx512erintrin.h index 1a5ea15..57c61aa 100644 --- a/contrib/llvm/tools/clang/lib/Headers/avx512erintrin.h +++ b/contrib/llvm/tools/clang/lib/Headers/avx512erintrin.h @@ -28,85 +28,259 @@ #define __AVX512ERINTRIN_H +// exp2a23 +#define _mm512_exp2a23_round_pd(A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (R)); }) + +#define _mm512_mask_exp2a23_round_pd(S, M, A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(S), \ + (__mmask8)(M), (R)); }) + +#define _mm512_maskz_exp2a23_round_pd(M, A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(M), (R)); }) + +#define _mm512_exp2a23_pd(A) \ + _mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_exp2a23_pd(S, M, A) \ + _mm512_mask_exp2a23_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_exp2a23_pd(M, A) \ + _mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_exp2a23_round_ps(A, R) __extension__ ({ \ + (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask8)-1, (R)); }) + +#define _mm512_mask_exp2a23_round_ps(S, M, A, R) __extension__ ({ \ + (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(S), \ + (__mmask8)(M), (R)); }) + +#define _mm512_maskz_exp2a23_round_ps(M, A, R) __extension__ ({ \ + (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask8)(M), (R)); }) + +#define _mm512_exp2a23_ps(A) \ + _mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_exp2a23_ps(S, M, A) \ + _mm512_mask_exp2a23_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_exp2a23_ps(M, A) \ + _mm512_maskz_exp2a23_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION) + // rsqrt28 -static __inline__ __m512d __attribute__((__always_inline__, __nodebug__)) -_mm512_rsqrt28_round_pd (__m512d __A, int __R) -{ - return (__m512d)__builtin_ia32_rsqrt28pd_mask ((__v8df)__A, - (__v8df)_mm512_setzero_pd(), - (__mmask8)-1, - __R); -} -static __inline__ __m512 __attribute__((__always_inline__, __nodebug__)) -_mm512_rsqrt28_round_ps(__m512 __A, int __R) -{ - return (__m512)__builtin_ia32_rsqrt28ps_mask ((__v16sf)__A, - (__v16sf)_mm512_setzero_ps(), - (__mmask16)-1, - __R); -} - -static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) -_mm_rsqrt28_round_ss(__m128 __A, __m128 __B, int __R) -{ - return (__m128) __builtin_ia32_rsqrt28ss_mask ((__v4sf) __A, - (__v4sf) __B, - (__v4sf) - _mm_setzero_ps (), - (__mmask8) -1, - __R); -} - -static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) -_mm_rsqrt28_round_sd (__m128d __A, __m128d __B, int __R) -{ - return (__m128d) __builtin_ia32_rsqrt28sd_mask ((__v2df) __A, - (__v2df) __B, - (__v2df) - _mm_setzero_pd (), - (__mmask8) -1, - __R); -} +#define _mm512_rsqrt28_round_pd(A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (R)); }) + +#define _mm512_mask_rsqrt28_round_pd(S, M, A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(S), \ + (__mmask8)(M), (R)); }) + +#define _mm512_maskz_rsqrt28_round_pd(M, A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(M), (R)); }) + +#define _mm512_rsqrt28_pd(A) \ + _mm512_rsqrt28_round_pd((A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_rsqrt28_pd(S, M, A) \ + _mm512_mask_rsqrt28_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_rsqrt28_pd(M, A) \ + _mm512_maskz_rsqrt28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_rsqrt28_round_ps(A, R) __extension__ ({ \ + (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (R)); }) + +#define _mm512_mask_rsqrt28_round_ps(S, M, A, R) __extension__ ({ \ + (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(S), \ + (__mmask16)(M), (R)); }) + +#define _mm512_maskz_rsqrt28_round_ps(M, A, R) __extension__ ({ \ + (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(M), (R)); }) + +#define _mm512_rsqrt28_ps(A) \ + _mm512_rsqrt28_round_ps((A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_rsqrt28_ps(S, M, A) \ + _mm512_mask_rsqrt28_round_ps((S), (M), A, _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_rsqrt28_ps(M, A) \ + _mm512_maskz_rsqrt28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm_rsqrt28_round_ss(A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_rsqrt28ss_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (R)); }) + +#define _mm_mask_rsqrt28_round_ss(S, M, A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_rsqrt28ss_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(S), \ + (__mmask8)(M), (R)); }) + +#define _mm_maskz_rsqrt28_round_ss(M, A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_rsqrt28ss_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(M), (R)); }) + +#define _mm_rsqrt28_ss(A, B) \ + _mm_rsqrt28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_rsqrt28_ss(S, M, A, B) \ + _mm_mask_rsqrt28_round_ss((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_rsqrt28_ss(M, A, B) \ + _mm_maskz_rsqrt28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION) +#define _mm_rsqrt28_round_sd(A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_rsqrt28sd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (R)); }) + +#define _mm_mask_rsqrt28_round_sd(S, M, A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_rsqrt28sd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(S), \ + (__mmask8)(M), (R)); }) + +#define _mm_maskz_rsqrt28_round_sd(M, A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_rsqrt28sd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(M), (R)); }) + +#define _mm_rsqrt28_sd(A, B) \ + _mm_rsqrt28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_rsqrt28_sd(S, M, A, B) \ + _mm_mask_rsqrt28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_rsqrt28_sd(M, A, B) \ + _mm_mask_rsqrt28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION) // rcp28 -static __inline__ __m512d __attribute__((__always_inline__, __nodebug__)) -_mm512_rcp28_round_pd (__m512d __A, int __R) -{ - return (__m512d)__builtin_ia32_rcp28pd_mask ((__v8df)__A, - (__v8df)_mm512_setzero_pd(), - (__mmask8)-1, - __R); -} - -static __inline__ __m512 __attribute__((__always_inline__, __nodebug__)) -_mm512_rcp28_round_ps (__m512 __A, int __R) -{ - return (__m512)__builtin_ia32_rcp28ps_mask ((__v16sf)__A, - (__v16sf)_mm512_setzero_ps (), - (__mmask16)-1, - __R); -} - -static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) -_mm_rcp28_round_ss (__m128 __A, __m128 __B, int __R) -{ - return (__m128) __builtin_ia32_rcp28ss_mask ((__v4sf) __A, - (__v4sf) __B, - (__v4sf) - _mm_setzero_ps (), - (__mmask8) -1, - __R); -} -static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) -_mm_rcp28_round_sd (__m128d __A, __m128d __B, int __R) -{ - return (__m128d) __builtin_ia32_rcp28sd_mask ((__v2df) __A, - (__v2df) __B, - (__v2df) - _mm_setzero_pd (), - (__mmask8) -1, - __R); -} +#define _mm512_rcp28_round_pd(A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)-1, (R)); }) + +#define _mm512_mask_rcp28_round_pd(S, M, A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(S), \ + (__mmask8)(M), (R)); }) + +#define _mm512_maskz_rcp28_round_pd(M, A, R) __extension__ ({ \ + (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ + (__v8df)_mm512_setzero_pd(), \ + (__mmask8)(M), (R)); }) + +#define _mm512_rcp28_pd(A) \ + _mm512_rcp28_round_pd((A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_rcp28_pd(S, M, A) \ + _mm512_mask_rcp28_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_rcp28_pd(M, A) \ + _mm512_maskz_rcp28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_rcp28_round_ps(A, R) __extension__ ({ \ + (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (R)); }) + +#define _mm512_mask_rcp28_round_ps(S, M, A, R) __extension__ ({ \ + (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(S), \ + (__mmask16)(M), (R)); }) + +#define _mm512_maskz_rcp28_round_ps(M, A, R) __extension__ ({ \ + (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)(M), (R)); }) + +#define _mm512_rcp28_ps(A) \ + _mm512_rcp28_round_ps((A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_rcp28_ps(S, M, A) \ + _mm512_mask_rcp28_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_maskz_rcp28_ps(M, A) \ + _mm512_maskz_rcp28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION) + +#define _mm_rcp28_round_ss(A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_rcp28ss_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (R)); }) + +#define _mm_mask_rcp28_round_ss(S, M, A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_rcp28ss_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(S), \ + (__mmask8)(M), (R)); }) + +#define _mm_maskz_rcp28_round_ss(M, A, B, R) __extension__ ({ \ + (__m128)__builtin_ia32_rcp28ss_mask((__v4sf)(__m128)(A), \ + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(M), (R)); }) + +#define _mm_rcp28_ss(A, B) \ + _mm_rcp28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_rcp28_ss(S, M, A, B) \ + _mm_mask_rcp28_round_ss((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_rcp28_ss(M, A, B) \ + _mm_maskz_rcp28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_rcp28_round_sd(A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_rcp28sd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (R)); }) + +#define _mm_mask_rcp28_round_sd(S, M, A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_rcp28sd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(S), \ + (__mmask8)(M), (R)); }) + +#define _mm_maskz_rcp28_round_sd(M, A, B, R) __extension__ ({ \ + (__m128d)__builtin_ia32_rcp28sd_mask((__v2df)(__m128d)(A), \ + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(M), (R)); }) + +#define _mm_rcp28_sd(A, B) \ + _mm_rcp28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_mask_rcp28_sd(S, M, A, B) \ + _mm_mask_rcp28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION) + +#define _mm_maskz_rcp28_sd(M, A, B) \ + _mm_maskz_rcp28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION) #endif // __AVX512ERINTRIN_H diff --git a/contrib/llvm/tools/clang/lib/Headers/avx512fintrin.h b/contrib/llvm/tools/clang/lib/Headers/avx512fintrin.h index 9c80710..d299704 100644 --- a/contrib/llvm/tools/clang/lib/Headers/avx512fintrin.h +++ b/contrib/llvm/tools/clang/lib/Headers/avx512fintrin.h @@ -162,6 +162,224 @@ _mm512_castps512_ps128(__m512 __a) return __builtin_shufflevector(__a, __a, 0, 1, 2, 3); } +/* Bitwise operators */ +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_and_epi32(__m512i __a, __m512i __b) +{ + return __a & __b; +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_and_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b) +{ + return (__m512i) __builtin_ia32_pandd512_mask((__v16si) __a, + (__v16si) __b, + (__v16si) __src, + (__mmask16) __k); +} +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_maskz_and_epi32(__mmask16 __k, __m512i __a, __m512i __b) +{ + return (__m512i) __builtin_ia32_pandd512_mask((__v16si) __a, + (__v16si) __b, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __k); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_and_epi64(__m512i __a, __m512i __b) +{ + return __a & __b; +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_and_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b) +{ + return (__m512i) __builtin_ia32_pandq512_mask ((__v8di) __a, + (__v8di) __b, + (__v8di) __src, + (__mmask8) __k); +} +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_maskz_and_epi64(__mmask8 __k, __m512i __a, __m512i __b) +{ + return (__m512i) __builtin_ia32_pandq512_mask ((__v8di) __a, + (__v8di) __b, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __k); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_andnot_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pandnd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) -1); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_andnot_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pandnd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, + (__mmask16) __U); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_maskz_andnot_epi32 (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pandnd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_andnot_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pandnq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) -1); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_andnot_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pandnq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, __U); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_maskz_andnot_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pandnq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_pd (), + __U); +} +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_or_epi32(__m512i __a, __m512i __b) +{ + return __a | __b; +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_or_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b) +{ + return (__m512i) __builtin_ia32_pord512_mask((__v16si) __a, + (__v16si) __b, + (__v16si) __src, + (__mmask16) __k); +} +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_maskz_or_epi32(__mmask16 __k, __m512i __a, __m512i __b) +{ + return (__m512i) __builtin_ia32_pord512_mask((__v16si) __a, + (__v16si) __b, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __k); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_or_epi64(__m512i __a, __m512i __b) +{ + return __a | __b; +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_or_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b) +{ + return (__m512i) __builtin_ia32_porq512_mask ((__v8di) __a, + (__v8di) __b, + (__v8di) __src, + (__mmask8) __k); +} +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_maskz_or_epi64(__mmask8 __k, __m512i __a, __m512i __b) +{ + return (__m512i) __builtin_ia32_porq512_mask ((__v8di) __a, + (__v8di) __b, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __k); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_xor_epi32(__m512i __a, __m512i __b) +{ + return __a ^ __b; +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_xor_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b) +{ + return (__m512i) __builtin_ia32_pxord512_mask((__v16si) __a, + (__v16si) __b, + (__v16si) __src, + (__mmask16) __k); +} +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_maskz_xor_epi32(__mmask16 __k, __m512i __a, __m512i __b) +{ + return (__m512i) __builtin_ia32_pxord512_mask((__v16si) __a, + (__v16si) __b, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __k); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_xor_epi64(__m512i __a, __m512i __b) +{ + return __a ^ __b; +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_xor_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b) +{ + return (__m512i) __builtin_ia32_pxorq512_mask ((__v8di) __a, + (__v8di) __b, + (__v8di) __src, + (__mmask8) __k); +} +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_maskz_xor_epi64(__mmask8 __k, __m512i __a, __m512i __b) +{ + return (__m512i) __builtin_ia32_pxorq512_mask ((__v8di) __a, + (__v8di) __b, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __k); +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_and_si512(__m512i __a, __m512i __b) +{ + return __a & __b; +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_or_si512(__m512i __a, __m512i __b) +{ + return __a | __b; +} + +static __inline__ __m512i __attribute__((__always_inline__, __nodebug__)) +_mm512_xor_si512(__m512i __a, __m512i __b) +{ + return __a ^ __b; +} /* Arithmetic */ static __inline __m512d __attribute__((__always_inline__, __nodebug__)) @@ -200,6 +418,106 @@ _mm512_sub_ps(__m512 __a, __m512 __b) return __a - __b; } +static __inline__ __m512i __attribute__ ((__always_inline__, __nodebug__)) +_mm512_add_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v8di) __A + (__v8di) __B); +} + +static __inline__ __m512i __attribute__ ((__always_inline__, __nodebug__)) +_mm512_mask_add_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_paddq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline__ __m512i __attribute__ ((__always_inline__, __nodebug__)) +_mm512_maskz_add_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_paddq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +static __inline__ __m512i __attribute__ ((__always_inline__, __nodebug__)) +_mm512_sub_epi64 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v8di) __A - (__v8di) __B); +} + +static __inline__ __m512i __attribute__ ((__always_inline__, __nodebug__)) +_mm512_mask_sub_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psubq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) __W, + (__mmask8) __U); +} + +static __inline__ __m512i __attribute__ ((__always_inline__, __nodebug__)) +_mm512_maskz_sub_epi64 (__mmask8 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psubq512_mask ((__v8di) __A, + (__v8di) __B, + (__v8di) + _mm512_setzero_si512 (), + (__mmask8) __U); +} + +static __inline__ __m512i __attribute__ ((__always_inline__, __nodebug__)) +_mm512_add_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v16si) __A + (__v16si) __B); +} + +static __inline__ __m512i __attribute__ ((__always_inline__, __nodebug__)) +_mm512_mask_add_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_paddd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, + (__mmask16) __U); +} + +static __inline__ __m512i __attribute__ ((__always_inline__, __nodebug__)) +_mm512_maskz_add_epi32 (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_paddd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + +static __inline__ __m512i __attribute__ ((__always_inline__, __nodebug__)) +_mm512_sub_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v16si) __A - (__v16si) __B); +} + +static __inline__ __m512i __attribute__ ((__always_inline__, __nodebug__)) +_mm512_mask_sub_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psubd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, + (__mmask16) __U); +} + +static __inline__ __m512i __attribute__ ((__always_inline__, __nodebug__)) +_mm512_maskz_sub_epi32 (__mmask16 __U, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_psubd512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + static __inline__ __m512d __attribute__((__always_inline__, __nodebug__)) _mm512_max_pd(__m512d __A, __m512d __B) { @@ -337,6 +655,24 @@ _mm512_mul_epi32(__m512i __X, __m512i __Y) } static __inline __m512i __attribute__ ((__always_inline__, __nodebug__)) +_mm512_mask_mul_epi32 (__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X, + (__v16si) __Y, + (__v8di) __W, __M); +} + +static __inline __m512i __attribute__ ((__always_inline__, __nodebug__)) +_mm512_maskz_mul_epi32 (__mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X, + (__v16si) __Y, + (__v8di) + _mm512_setzero_si512 (), + __M); +} + +static __inline __m512i __attribute__ ((__always_inline__, __nodebug__)) _mm512_mul_epu32(__m512i __X, __m512i __Y) { return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X, @@ -346,6 +682,48 @@ _mm512_mul_epu32(__m512i __X, __m512i __Y) (__mmask8) -1); } +static __inline __m512i __attribute__ ((__always_inline__, __nodebug__)) +_mm512_mask_mul_epu32 (__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X, + (__v16si) __Y, + (__v8di) __W, __M); +} + +static __inline __m512i __attribute__ ((__always_inline__, __nodebug__)) +_mm512_maskz_mul_epu32 (__mmask8 __M, __m512i __X, __m512i __Y) +{ + return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X, + (__v16si) __Y, + (__v8di) + _mm512_setzero_si512 (), + __M); +} + +static __inline __m512i __attribute__ ((__always_inline__, __nodebug__)) +_mm512_mullo_epi32 (__m512i __A, __m512i __B) +{ + return (__m512i) ((__v16si) __A * (__v16si) __B); +} + +static __inline __m512i __attribute__ ((__always_inline__, __nodebug__)) +_mm512_maskz_mullo_epi32 (__mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmulld512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) + _mm512_setzero_si512 (), + __M); +} + +static __inline __m512i __attribute__ ((__always_inline__, __nodebug__)) +_mm512_mask_mullo_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B) +{ + return (__m512i) __builtin_ia32_pmulld512_mask ((__v16si) __A, + (__v16si) __B, + (__v16si) __W, __M); +} + static __inline__ __m512d __attribute__((__always_inline__, __nodebug__)) _mm512_sqrt_pd(__m512d a) { @@ -492,20 +870,13 @@ _mm512_abs_epi32(__m512i __A) (__mmask16) -1); } -static __inline __m512 __attribute__ ((__always_inline__, __nodebug__)) -_mm512_roundscale_ps(__m512 __A, const int __imm) -{ - return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A, __imm, - (__v16sf) __A, -1, - _MM_FROUND_CUR_DIRECTION); -} -static __inline __m512d __attribute__ ((__always_inline__, __nodebug__)) -_mm512_roundscale_pd(__m512d __A, const int __imm) -{ - return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A, __imm, - (__v8df) __A, -1, - _MM_FROUND_CUR_DIRECTION); -} +#define _mm512_roundscale_ps(A, B) __extension__ ({ \ + (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(A), (B), (__v16sf)(A), \ + -1, _MM_FROUND_CUR_DIRECTION); }) + +#define _mm512_roundscale_pd(A, B) __extension__ ({ \ + (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(A), (B), (__v8df)(A), \ + -1, _MM_FROUND_CUR_DIRECTION); }) static __inline__ __m512d __attribute__((__always_inline__, __nodebug__)) _mm512_fmadd_pd(__m512d __A, __m512d __B, __m512d __C) @@ -613,25 +984,35 @@ _mm512_permutex2var_ps(__m512 __A, __m512i __I, __m512 __B) (__mmask16) -1); } -static __inline __m512i __attribute__ ((__always_inline__, __nodebug__)) -_mm512_valign_epi64(__m512i __A, __m512i __B, const int __I) -{ - return (__m512i) __builtin_ia32_alignq512_mask((__v8di)__A, - (__v8di)__B, - __I, - (__v8di)_mm512_setzero_si512(), - (__mmask8) -1); -} - -static __inline __m512i __attribute__ ((__always_inline__, __nodebug__)) -_mm512_valign_epi32(__m512i __A, __m512i __B, const int __I) -{ - return (__m512i)__builtin_ia32_alignd512_mask((__v16si)__A, - (__v16si)__B, - __I, - (__v16si)_mm512_setzero_si512(), - (__mmask16) -1); -} +#define _mm512_alignr_epi64(A, B, I) __extension__ ({ \ + (__m512i)__builtin_ia32_alignq512_mask((__v8di)(__m512i)(A), \ + (__v8di)(__m512i)(B), \ + (I), (__v8di)_mm512_setzero_si512(), \ + (__mmask8)-1); }) + +#define _mm512_alignr_epi32(A, B, I) __extension__ ({ \ + (__m512i)__builtin_ia32_alignd512_mask((__v16si)(__m512i)(A), \ + (__v16si)(__m512i)(B), \ + (I), (__v16si)_mm512_setzero_si512(), \ + (__mmask16)-1); }) + +/* Vector Extract */ + +#define _mm512_extractf64x4_pd(A, I) __extension__ ({ \ + __m512d __A = (A); \ + (__m256d) \ + __builtin_ia32_extractf64x4_mask((__v8df)__A, \ + (I), \ + (__v4df)_mm256_setzero_si256(), \ + (__mmask8) -1); }) + +#define _mm512_extractf32x4_ps(A, I) __extension__ ({ \ + __m512 __A = (A); \ + (__m128) \ + __builtin_ia32_extractf32x4_mask((__v16sf)__A, \ + (I), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8) -1); }) /* Vector Blend */ @@ -669,22 +1050,37 @@ _mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W) /* Compare */ -static __inline __mmask16 __attribute__ ((__always_inline__, __nodebug__)) -_mm512_cmp_ps_mask(__m512 a, __m512 b, const int p) -{ - return (__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf) a, - (__v16sf) b, p, (__mmask16) -1, - _MM_FROUND_CUR_DIRECTION); -} +#define _mm512_cmp_round_ps_mask(A, B, P, R) __extension__ ({ \ + (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (P), (__mmask16)-1, (R)); }) -static __inline __mmask8 __attribute__ ((__always_inline__, __nodebug__)) -_mm512_cmp_pd_mask(__m512d __X, __m512d __Y, const int __P) -{ - return (__mmask8) __builtin_ia32_cmppd512_mask ((__v8df) __X, - (__v8df) __Y, __P, - (__mmask8) -1, - _MM_FROUND_CUR_DIRECTION); -} +#define _mm512_mask_cmp_round_ps_mask(U, A, B, P, R) __extension__ ({ \ + (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \ + (__v16sf)(__m512)(B), \ + (P), (__mmask16)(U), (R)); }) + +#define _mm512_cmp_ps_mask(A, B, P) \ + _mm512_cmp_round_ps_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_cmp_ps_mask(U, A, B, P) \ + _mm512_mask_cmp_round_ps_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_cmp_round_pd_mask(A, B, P, R) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (P), (__mmask8)-1, (R)); }) + +#define _mm512_mask_cmp_round_pd_mask(U, A, B, P, R) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (P), (__mmask8)(U), (R)); }) + +#define _mm512_cmp_pd_mask(A, B, P) \ + _mm512_cmp_round_pd_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION) + +#define _mm512_mask_cmp_pd_mask(U, A, B, P) \ + _mm512_mask_cmp_round_pd_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION) /* Conversion */ @@ -698,25 +1094,15 @@ _mm512_cvttps_epu32(__m512 __A) _MM_FROUND_CUR_DIRECTION); } -static __inline __m512 __attribute__ (( __always_inline__, __nodebug__)) -_mm512_cvt_roundepi32_ps(__m512i __A, const int __R) -{ - return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A, - (__v16sf) - _mm512_setzero_ps (), - (__mmask16) -1, - __R); -} +#define _mm512_cvt_roundepi32_ps(A, R) __extension__ ({ \ + (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (R)); }) -static __inline __m512 __attribute__ (( __always_inline__, __nodebug__)) -_mm512_cvt_roundepu32_ps(__m512i __A, const int __R) -{ - return (__m512) __builtin_ia32_cvtudq2ps512_mask ((__v16si) __A, - (__v16sf) - _mm512_setzero_ps (), - (__mmask16) -1, - __R); -} +#define _mm512_cvt_roundepu32_ps(A, R) __extension__ ({ \ + (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(A), \ + (__v16sf)_mm512_setzero_ps(), \ + (__mmask16)-1, (R)); }) static __inline __m512d __attribute__ (( __always_inline__, __nodebug__)) _mm512_cvtepi32_pd(__m256i __A) @@ -735,25 +1121,16 @@ _mm512_cvtepu32_pd(__m256i __A) _mm512_setzero_pd (), (__mmask8) -1); } -static __inline __m256 __attribute__ (( __always_inline__, __nodebug__)) -_mm512_cvt_roundpd_ps(__m512d __A, const int __R) -{ - return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A, - (__v8sf) - _mm256_setzero_ps (), - (__mmask8) -1, - __R); -} -static __inline __m256i __attribute__ ((__always_inline__, __nodebug__)) -_mm512_cvtps_ph(__m512 __A, const int __I) -{ - return (__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf) __A, - __I, - (__v16hi) - _mm256_setzero_si256 (), - -1); -} +#define _mm512_cvt_roundpd_ps(A, R) __extension__ ({ \ + (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(A), \ + (__v8sf)_mm256_setzero_ps(), \ + (__mmask8)-1, (R)); }) + +#define _mm512_cvtps_ph(A, I) __extension__ ({ \ + (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(A), (I), \ + (__v16hi)_mm256_setzero_si256(), \ + -1); }) static __inline __m512 __attribute__ ((__always_inline__, __nodebug__)) _mm512_cvtph_ps(__m256i __A) @@ -783,61 +1160,35 @@ _mm512_cvttpd_epi32(__m512d a) _MM_FROUND_CUR_DIRECTION); } -static __inline __m256i __attribute__ ((__always_inline__, __nodebug__)) -_mm512_cvtt_roundpd_epi32(__m512d __A, const int __R) -{ - return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A, - (__v8si) - _mm256_setzero_si256 (), - (__mmask8) -1, - __R); -} -static __inline __m512i __attribute__ ((__always_inline__, __nodebug__)) -_mm512_cvtt_roundps_epi32(__m512 __A, const int __R) -{ - return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A, - (__v16si) - _mm512_setzero_si512 (), - (__mmask16) -1, - __R); -} +#define _mm512_cvtt_roundpd_epi32(A, R) __extension__ ({ \ + (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(A), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)-1, (R)); }) -static __inline __m512i __attribute__ ((__always_inline__, __nodebug__)) -_mm512_cvt_roundps_epi32(__m512 __A, const int __R) -{ - return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A, - (__v16si) - _mm512_setzero_si512 (), - (__mmask16) -1, - __R); -} -static __inline __m256i __attribute__ ((__always_inline__, __nodebug__)) -_mm512_cvt_roundpd_epi32(__m512d __A, const int __R) -{ - return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A, - (__v8si) - _mm256_setzero_si256 (), - (__mmask8) -1, - __R); -} -static __inline __m512i __attribute__ ((__always_inline__, __nodebug__)) -_mm512_cvt_roundps_epu32(__m512 __A, const int __R) -{ - return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A, - (__v16si) - _mm512_setzero_si512 (), - (__mmask16) -1, - __R); -} -static __inline __m256i __attribute__ ((__always_inline__, __nodebug__)) -_mm512_cvt_roundpd_epu32(__m512d __A, const int __R) -{ - return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A, - (__v8si) - _mm256_setzero_si256 (), - (__mmask8) -1, - __R); -} +#define _mm512_cvtt_roundps_epi32(A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(A), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)-1, (R)); }) + +#define _mm512_cvt_roundps_epi32(A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(A), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)-1, (R)); }) + +#define _mm512_cvt_roundpd_epi32(A, R) __extension__ ({ \ + (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(A), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8)-1, (R)); }) + +#define _mm512_cvt_roundps_epu32(A, R) __extension__ ({ \ + (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(A), \ + (__v16si)_mm512_setzero_si512(), \ + (__mmask16)-1, (R)); }) + +#define _mm512_cvt_roundpd_epu32(A, R) __extension__ ({ \ + (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(A), \ + (__v8si)_mm256_setzero_si256(), \ + (__mmask8) -1, (R)); }) /* Unpack and Interleave */ static __inline __m512d __attribute__((__always_inline__, __nodebug__)) @@ -928,12 +1279,30 @@ _mm512_maskz_loadu_pd(__mmask8 __U, void const *__P) (__mmask8) __U); } +static __inline __m512 __attribute__ ((__always_inline__, __nodebug__)) +_mm512_maskz_load_ps(__mmask16 __U, void const *__P) +{ + return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *)__P, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) __U); +} + +static __inline __m512d __attribute__ ((__always_inline__, __nodebug__)) +_mm512_maskz_load_pd(__mmask8 __U, void const *__P) +{ + return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *)__P, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) __U); +} + static __inline __m512d __attribute__((__always_inline__, __nodebug__)) _mm512_loadu_pd(double const *__p) { struct __loadu_pd { __m512d __v; - } __attribute__((packed, may_alias)); + } __attribute__((__packed__, __may_alias__)); return ((struct __loadu_pd*)__p)->__v; } @@ -942,10 +1311,28 @@ _mm512_loadu_ps(float const *__p) { struct __loadu_ps { __m512 __v; - } __attribute__((packed, may_alias)); + } __attribute__((__packed__, __may_alias__)); return ((struct __loadu_ps*)__p)->__v; } +static __inline __m512 __attribute__((__always_inline__, __nodebug__)) +_mm512_load_ps(double const *__p) +{ + return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *)__p, + (__v16sf) + _mm512_setzero_ps (), + (__mmask16) -1); +} + +static __inline __m512d __attribute__((__always_inline__, __nodebug__)) +_mm512_load_pd(float const *__p) +{ + return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *)__p, + (__v8df) + _mm512_setzero_pd (), + (__mmask8) -1); +} + /* SIMD store ops */ static __inline void __attribute__ ((__always_inline__, __nodebug__)) @@ -988,9 +1375,9 @@ _mm512_storeu_ps(void *__P, __m512 __A) } static __inline void __attribute__ ((__always_inline__, __nodebug__)) -_mm512_store_ps(void *__P, __m512 __A) +_mm512_mask_store_pd(void *__P, __mmask8 __U, __m512d __A) { - *(__m512*)__P = __A; + __builtin_ia32_storeapd512_mask ((__v8df *)__P, (__v8df) __A, (__mmask8) __U); } static __inline void __attribute__ ((__always_inline__, __nodebug__)) @@ -999,6 +1386,19 @@ _mm512_store_pd(void *__P, __m512d __A) *(__m512d*)__P = __A; } +static __inline void __attribute__ ((__always_inline__, __nodebug__)) +_mm512_mask_store_ps(void *__P, __mmask16 __U, __m512 __A) +{ + __builtin_ia32_storeaps512_mask ((__v16sf *)__P, (__v16sf) __A, + (__mmask16) __U); +} + +static __inline void __attribute__ ((__always_inline__, __nodebug__)) +_mm512_store_ps(void *__P, __m512 __A) +{ + *(__m512*)__P = __A; +} + /* Mask ops */ static __inline __mmask16 __attribute__ ((__always_inline__, __nodebug__)) @@ -1021,6 +1421,18 @@ _mm512_mask_cmpeq_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) { __u); } +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpeq_epu32_mask(__m512i __a, __m512i __b) { + return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 0, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpeq_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) { + return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 0, + __u); +} + static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) _mm512_mask_cmpeq_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) { return (__mmask8)__builtin_ia32_pcmpeqq512_mask((__v8di)__a, (__v8di)__b, @@ -1033,4 +1445,303 @@ _mm512_cmpeq_epi64_mask(__m512i __a, __m512i __b) { (__mmask8)-1); } +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpeq_epu64_mask(__m512i __a, __m512i __b) { + return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 0, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpeq_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) { + return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 0, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpge_epi32_mask(__m512i __a, __m512i __b) { + return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 5, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpge_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) { + return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 5, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpge_epu32_mask(__m512i __a, __m512i __b) { + return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 5, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpge_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) { + return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 5, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpge_epi64_mask(__m512i __a, __m512i __b) { + return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 5, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpge_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) { + return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 5, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpge_epu64_mask(__m512i __a, __m512i __b) { + return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 5, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpge_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) { + return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 5, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpgt_epi32_mask(__m512i __a, __m512i __b) { + return (__mmask16)__builtin_ia32_pcmpgtd512_mask((__v16si)__a, (__v16si)__b, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpgt_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) { + return (__mmask16)__builtin_ia32_pcmpgtd512_mask((__v16si)__a, (__v16si)__b, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpgt_epu32_mask(__m512i __a, __m512i __b) { + return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 6, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpgt_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) { + return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 6, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpgt_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) { + return (__mmask8)__builtin_ia32_pcmpgtq512_mask((__v8di)__a, (__v8di)__b, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpgt_epi64_mask(__m512i __a, __m512i __b) { + return (__mmask8)__builtin_ia32_pcmpgtq512_mask((__v8di)__a, (__v8di)__b, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpgt_epu64_mask(__m512i __a, __m512i __b) { + return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 6, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpgt_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) { + return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 6, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmple_epi32_mask(__m512i __a, __m512i __b) { + return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 2, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmple_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) { + return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 2, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmple_epu32_mask(__m512i __a, __m512i __b) { + return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 2, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmple_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) { + return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 2, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmple_epi64_mask(__m512i __a, __m512i __b) { + return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 2, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmple_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) { + return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 2, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmple_epu64_mask(__m512i __a, __m512i __b) { + return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 2, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmple_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) { + return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 2, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmplt_epi32_mask(__m512i __a, __m512i __b) { + return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 1, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmplt_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) { + return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 1, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmplt_epu32_mask(__m512i __a, __m512i __b) { + return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 1, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmplt_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) { + return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 1, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmplt_epi64_mask(__m512i __a, __m512i __b) { + return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 1, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmplt_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) { + return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 1, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmplt_epu64_mask(__m512i __a, __m512i __b) { + return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 1, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmplt_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) { + return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 1, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpneq_epi32_mask(__m512i __a, __m512i __b) { + return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 4, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpneq_epi32_mask(__mmask16 __u, __m512i __a, __m512i __b) { + return (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, 4, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpneq_epu32_mask(__m512i __a, __m512i __b) { + return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 4, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpneq_epu32_mask(__mmask16 __u, __m512i __a, __m512i __b) { + return (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, 4, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpneq_epi64_mask(__m512i __a, __m512i __b) { + return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 4, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpneq_epi64_mask(__mmask8 __u, __m512i __a, __m512i __b) { + return (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, 4, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm512_cmpneq_epu64_mask(__m512i __a, __m512i __b) { + return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 4, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm512_mask_cmpneq_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) { + return (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, 4, + __u); +} + +#define _mm512_cmp_epi32_mask(a, b, p) __extension__ ({ \ + __m512i __a = (a); \ + __m512i __b = (b); \ + (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, (p), \ + (__mmask16)-1); }) + +#define _mm512_cmp_epu32_mask(a, b, p) __extension__ ({ \ + __m512i __a = (a); \ + __m512i __b = (b); \ + (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, (p), \ + (__mmask16)-1); }) + +#define _mm512_cmp_epi64_mask(a, b, p) __extension__ ({ \ + __m512i __a = (a); \ + __m512i __b = (b); \ + (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, (p), \ + (__mmask8)-1); }) + +#define _mm512_cmp_epu64_mask(a, b, p) __extension__ ({ \ + __m512i __a = (a); \ + __m512i __b = (b); \ + (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, (p), \ + (__mmask8)-1); }) + +#define _mm512_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \ + __m512i __a = (a); \ + __m512i __b = (b); \ + (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)__a, (__v16si)__b, (p), \ + (__mmask16)(m)); }) + +#define _mm512_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \ + __m512i __a = (a); \ + __m512i __b = (b); \ + (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)__a, (__v16si)__b, (p), \ + (__mmask16)(m)); }) + +#define _mm512_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \ + __m512i __a = (a); \ + __m512i __b = (b); \ + (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)__a, (__v8di)__b, (p), \ + (__mmask8)(m)); }) + +#define _mm512_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \ + __m512i __a = (a); \ + __m512i __b = (b); \ + (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)__a, (__v8di)__b, (p), \ + (__mmask8)(m)); }) #endif // __AVX512FINTRIN_H diff --git a/contrib/llvm/tools/clang/lib/Headers/avx512vlbwintrin.h b/contrib/llvm/tools/clang/lib/Headers/avx512vlbwintrin.h index 11333f8..c3b087e 100644 --- a/contrib/llvm/tools/clang/lib/Headers/avx512vlbwintrin.h +++ b/contrib/llvm/tools/clang/lib/Headers/avx512vlbwintrin.h @@ -42,6 +42,17 @@ _mm_mask_cmpeq_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) { __u); } +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpeq_epu8_mask(__m128i __a, __m128i __b) { + return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 0, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpeq_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) { + return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 0, + __u); +} static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) _mm256_cmpeq_epi8_mask(__m256i __a, __m256i __b) { @@ -55,6 +66,18 @@ _mm256_mask_cmpeq_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) { __u); } +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpeq_epu8_mask(__m256i __a, __m256i __b) { + return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 0, + (__mmask32)-1); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpeq_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) { + return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 0, + __u); +} + static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) _mm_cmpeq_epi16_mask(__m128i __a, __m128i __b) { return (__mmask8)__builtin_ia32_pcmpeqw128_mask((__v8hi)__a, (__v8hi)__b, @@ -67,6 +90,17 @@ _mm_mask_cmpeq_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) { __u); } +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpeq_epu16_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 0, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpeq_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 0, + __u); +} static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) _mm256_cmpeq_epi16_mask(__m256i __a, __m256i __b) { @@ -80,4 +114,744 @@ _mm256_mask_cmpeq_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) { __u); } +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpeq_epu16_mask(__m256i __a, __m256i __b) { + return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 0, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpeq_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) { + return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 0, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpge_epi8_mask(__m128i __a, __m128i __b) { + return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 5, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpge_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) { + return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 5, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpge_epu8_mask(__m128i __a, __m128i __b) { + return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 5, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpge_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) { + return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 5, + __u); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpge_epi8_mask(__m256i __a, __m256i __b) { + return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 5, + (__mmask32)-1); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpge_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) { + return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 5, + __u); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpge_epu8_mask(__m256i __a, __m256i __b) { + return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 5, + (__mmask32)-1); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpge_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) { + return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 5, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpge_epi16_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 5, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpge_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 5, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpge_epu16_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 5, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpge_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 5, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpge_epi16_mask(__m256i __a, __m256i __b) { + return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 5, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpge_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) { + return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 5, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpge_epu16_mask(__m256i __a, __m256i __b) { + return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 5, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpge_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) { + return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 5, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpgt_epi8_mask(__m128i __a, __m128i __b) { + return (__mmask16)__builtin_ia32_pcmpgtb128_mask((__v16qi)__a, (__v16qi)__b, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpgt_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) { + return (__mmask16)__builtin_ia32_pcmpgtb128_mask((__v16qi)__a, (__v16qi)__b, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpgt_epu8_mask(__m128i __a, __m128i __b) { + return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 6, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpgt_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) { + return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 6, + __u); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpgt_epi8_mask(__m256i __a, __m256i __b) { + return (__mmask32)__builtin_ia32_pcmpgtb256_mask((__v32qi)__a, (__v32qi)__b, + (__mmask32)-1); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpgt_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) { + return (__mmask32)__builtin_ia32_pcmpgtb256_mask((__v32qi)__a, (__v32qi)__b, + __u); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpgt_epu8_mask(__m256i __a, __m256i __b) { + return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 6, + (__mmask32)-1); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpgt_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) { + return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 6, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpgt_epi16_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_pcmpgtw128_mask((__v8hi)__a, (__v8hi)__b, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpgt_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_pcmpgtw128_mask((__v8hi)__a, (__v8hi)__b, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpgt_epu16_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 6, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpgt_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 6, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpgt_epi16_mask(__m256i __a, __m256i __b) { + return (__mmask16)__builtin_ia32_pcmpgtw256_mask((__v16hi)__a, (__v16hi)__b, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpgt_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) { + return (__mmask16)__builtin_ia32_pcmpgtw256_mask((__v16hi)__a, (__v16hi)__b, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpgt_epu16_mask(__m256i __a, __m256i __b) { + return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 6, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpgt_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) { + return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 6, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm_cmple_epi8_mask(__m128i __a, __m128i __b) { + return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 2, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmple_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) { + return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 2, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm_cmple_epu8_mask(__m128i __a, __m128i __b) { + return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 2, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmple_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) { + return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 2, + __u); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmple_epi8_mask(__m256i __a, __m256i __b) { + return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 2, + (__mmask32)-1); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmple_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) { + return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 2, + __u); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmple_epu8_mask(__m256i __a, __m256i __b) { + return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 2, + (__mmask32)-1); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmple_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) { + return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 2, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmple_epi16_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 2, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmple_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 2, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmple_epu16_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 2, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmple_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 2, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmple_epi16_mask(__m256i __a, __m256i __b) { + return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 2, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmple_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) { + return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 2, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmple_epu16_mask(__m256i __a, __m256i __b) { + return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 2, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmple_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) { + return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 2, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm_cmplt_epi8_mask(__m128i __a, __m128i __b) { + return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 1, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmplt_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) { + return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 1, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm_cmplt_epu8_mask(__m128i __a, __m128i __b) { + return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 1, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmplt_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) { + return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 1, + __u); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmplt_epi8_mask(__m256i __a, __m256i __b) { + return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 1, + (__mmask32)-1); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmplt_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) { + return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 1, + __u); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmplt_epu8_mask(__m256i __a, __m256i __b) { + return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 1, + (__mmask32)-1); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmplt_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) { + return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 1, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmplt_epi16_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 1, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmplt_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 1, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmplt_epu16_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 1, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmplt_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 1, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmplt_epi16_mask(__m256i __a, __m256i __b) { + return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 1, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmplt_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) { + return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 1, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmplt_epu16_mask(__m256i __a, __m256i __b) { + return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 1, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmplt_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) { + return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 1, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpneq_epi8_mask(__m128i __a, __m128i __b) { + return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 4, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpneq_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) { + return (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)__a, (__v16qi)__b, 4, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpneq_epu8_mask(__m128i __a, __m128i __b) { + return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 4, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpneq_epu8_mask(__mmask16 __u, __m128i __a, __m128i __b) { + return (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)__a, (__v16qi)__b, 4, + __u); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpneq_epi8_mask(__m256i __a, __m256i __b) { + return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 4, + (__mmask32)-1); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpneq_epi8_mask(__mmask32 __u, __m256i __a, __m256i __b) { + return (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)__a, (__v32qi)__b, 4, + __u); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpneq_epu8_mask(__m256i __a, __m256i __b) { + return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 4, + (__mmask32)-1); +} + +static __inline__ __mmask32 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpneq_epu8_mask(__mmask32 __u, __m256i __a, __m256i __b) { + return (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)__a, (__v32qi)__b, 4, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpneq_epi16_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 4, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpneq_epi16_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)__a, (__v8hi)__b, 4, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpneq_epu16_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 4, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpneq_epu16_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)__a, (__v8hi)__b, 4, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpneq_epi16_mask(__m256i __a, __m256i __b) { + return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 4, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpneq_epi16_mask(__mmask16 __u, __m256i __a, __m256i __b) { + return (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)__a, (__v16hi)__b, 4, + __u); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpneq_epu16_mask(__m256i __a, __m256i __b) { + return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 4, + (__mmask16)-1); +} + +static __inline__ __mmask16 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpneq_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) { + return (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)__a, (__v16hi)__b, 4, + __u); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_add_epi8 (__m256i __W, __mmask32 __U, __m256i __A, __m256i __B){ + return (__m256i) __builtin_ia32_paddb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) __W, + (__mmask32) __U); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_maskz_add_epi8 (__mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i) __builtin_ia32_paddb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __U); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_add_epi16 (__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i) __builtin_ia32_paddw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) __W, + (__mmask16) __U); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_maskz_add_epi16 (__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i) __builtin_ia32_paddw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_sub_epi8 (__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i) __builtin_ia32_psubb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) __W, + (__mmask32) __U); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_maskz_sub_epi8 (__mmask32 __U, __m256i __A, __m256i __B) { + return (__m256i) __builtin_ia32_psubb256_mask ((__v32qi) __A, + (__v32qi) __B, + (__v32qi) + _mm256_setzero_si256 (), + (__mmask32) __U); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_sub_epi16 (__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i) __builtin_ia32_psubw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) __W, + (__mmask16) __U); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_maskz_sub_epi16 (__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i) __builtin_ia32_psubw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_mask_add_epi8 (__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i) __builtin_ia32_paddb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) __W, + (__mmask16) __U); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_maskz_add_epi8 (__mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i) __builtin_ia32_paddb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __U); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_mask_add_epi16 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i) __builtin_ia32_paddw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_maskz_add_epi16 (__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i) __builtin_ia32_paddw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_mask_sub_epi8 (__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i) __builtin_ia32_psubb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) __W, + (__mmask16) __U); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_maskz_sub_epi8 (__mmask16 __U, __m128i __A, __m128i __B) { + return (__m128i) __builtin_ia32_psubb128_mask ((__v16qi) __A, + (__v16qi) __B, + (__v16qi) + _mm_setzero_si128 (), + (__mmask16) __U); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_mask_sub_epi16 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i) __builtin_ia32_psubw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_maskz_sub_epi16 (__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i) __builtin_ia32_psubw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __attribute__ ((__always_inline__, __nodebug__)) +_mm256_mask_mullo_epi16 (__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i) __builtin_ia32_pmullw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) __W, + (__mmask16) __U); +} + +static __inline__ __m256i __attribute__ ((__always_inline__, __nodebug__)) +_mm256_maskz_mullo_epi16 (__mmask16 __U, __m256i __A, __m256i __B) { + return (__m256i) __builtin_ia32_pmullw256_mask ((__v16hi) __A, + (__v16hi) __B, + (__v16hi) + _mm256_setzero_si256 (), + (__mmask16) __U); +} + +static __inline__ __m128i __attribute__ ((__always_inline__, __nodebug__)) +_mm_mask_mullo_epi16 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i) __builtin_ia32_pmullw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__ ((__always_inline__, __nodebug__)) +_mm_maskz_mullo_epi16 (__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i) __builtin_ia32_pmullw128_mask ((__v8hi) __A, + (__v8hi) __B, + (__v8hi) + _mm_setzero_si128 (), + (__mmask8) __U); +} +#define _mm_cmp_epi8_mask(a, b, p) __extension__ ({ \ + (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \ + (__v16qi)(__m128i)(b), \ + (p), (__mmask16)-1); }) + +#define _mm_mask_cmp_epi8_mask(m, a, b, p) __extension__ ({ \ + (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \ + (__v16qi)(__m128i)(b), \ + (p), (__mmask16)(m)); }) + +#define _mm_cmp_epu8_mask(a, b, p) __extension__ ({ \ + (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \ + (__v16qi)(__m128i)(b), \ + (p), (__mmask16)-1); }) + +#define _mm_mask_cmp_epu8_mask(m, a, b, p) __extension__ ({ \ + (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \ + (__v16qi)(__m128i)(b), \ + (p), (__mmask16)(m)); }) + +#define _mm256_cmp_epi8_mask(a, b, p) __extension__ ({ \ + (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \ + (__v32qi)(__m256i)(b), \ + (p), (__mmask32)-1); }) + +#define _mm256_mask_cmp_epi8_mask(m, a, b, p) __extension__ ({ \ + (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \ + (__v32qi)(__m256i)(b), \ + (p), (__mmask32)(m)); }) + +#define _mm256_cmp_epu8_mask(a, b, p) __extension__ ({ \ + (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \ + (__v32qi)(__m256i)(b), \ + (p), (__mmask32)-1); }) + +#define _mm256_mask_cmp_epu8_mask(m, a, b, p) __extension__ ({ \ + (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \ + (__v32qi)(__m256i)(b), \ + (p), (__mmask32)(m)); }) + +#define _mm_cmp_epi16_mask(a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \ + (__v8hi)(__m128i)(b), \ + (p), (__mmask8)-1); }) + +#define _mm_mask_cmp_epi16_mask(m, a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \ + (__v8hi)(__m128i)(b), \ + (p), (__mmask8)(m)); }) + +#define _mm_cmp_epu16_mask(a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \ + (__v8hi)(__m128i)(b), \ + (p), (__mmask8)-1); }) + +#define _mm_mask_cmp_epu16_mask(m, a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \ + (__v8hi)(__m128i)(b), \ + (p), (__mmask8)(m)); }) + +#define _mm256_cmp_epi16_mask(a, b, p) __extension__ ({ \ + (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \ + (__v16hi)(__m256i)(b), \ + (p), (__mmask16)-1); }) + +#define _mm256_mask_cmp_epi16_mask(m, a, b, p) __extension__ ({ \ + (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \ + (__v16hi)(__m256i)(b), \ + (p), (__mmask16)(m)); }) + +#define _mm256_cmp_epu16_mask(a, b, p) __extension__ ({ \ + (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \ + (__v16hi)(__m256i)(b), \ + (p), (__mmask16)-1); }) + +#define _mm256_mask_cmp_epu16_mask(m, a, b, p) __extension__ ({ \ + (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \ + (__v16hi)(__m256i)(b), \ + (p), (__mmask16)(m)); }) + #endif /* __AVX512VLBWINTRIN_H */ diff --git a/contrib/llvm/tools/clang/lib/Headers/avx512vldqintrin.h b/contrib/llvm/tools/clang/lib/Headers/avx512vldqintrin.h new file mode 100644 index 0000000..4024446 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/Headers/avx512vldqintrin.h @@ -0,0 +1,349 @@ +/*===---- avx512vldqintrin.h - AVX512VL and AVX512DQ intrinsics ---------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use <avx512vldqintrin.h> directly; include <immintrin.h> instead." +#endif + +#ifndef __AVX512VLDQINTRIN_H +#define __AVX512VLDQINTRIN_H + + +static __inline__ __m256i __attribute__ ((__always_inline__, __nodebug__)) +_mm256_mullo_epi64 (__m256i __A, __m256i __B) { + return (__m256i) ((__v4di) __A * (__v4di) __B); +} + +static __inline__ __m256i __attribute__ ((__always_inline__, __nodebug__)) +_mm256_mask_mullo_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) { + return (__m256i) __builtin_ia32_pmullq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __attribute__ ((__always_inline__, __nodebug__)) +_mm256_maskz_mullo_epi64 (__mmask8 __U, __m256i __A, __m256i __B) { + return (__m256i) __builtin_ia32_pmullq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__ ((__always_inline__, __nodebug__)) +_mm_mullo_epi64 (__m128i __A, __m128i __B) { + return (__m128i) ((__v2di) __A * (__v2di) __B); +} + +static __inline__ __m128i __attribute__ ((__always_inline__, __nodebug__)) +_mm_mask_mullo_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i) __builtin_ia32_pmullq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__ ((__always_inline__, __nodebug__)) +_mm_maskz_mullo_epi64 (__mmask8 __U, __m128i __A, __m128i __B) { + return (__m128i) __builtin_ia32_pmullq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256d __attribute__ ((__always_inline__, __nodebug__)) +_mm256_mask_andnot_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d) __builtin_ia32_andnpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __attribute__ ((__always_inline__, __nodebug__)) +_mm256_maskz_andnot_pd (__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d) __builtin_ia32_andnpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128d __attribute__ ((__always_inline__, __nodebug__)) +_mm_mask_andnot_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_andnpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __attribute__ ((__always_inline__, __nodebug__)) +_mm_maskz_andnot_pd (__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_andnpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256 __attribute__ ((__always_inline__, __nodebug__)) +_mm256_mask_andnot_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256) __builtin_ia32_andnps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __attribute__ ((__always_inline__, __nodebug__)) +_mm256_maskz_andnot_ps (__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256) __builtin_ia32_andnps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128 __attribute__ ((__always_inline__, __nodebug__)) +_mm_mask_andnot_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_andnps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __attribute__ ((__always_inline__, __nodebug__)) +_mm_maskz_andnot_ps (__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_andnps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256d __attribute__ ((__always_inline__, __nodebug__)) +_mm256_mask_and_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d) __builtin_ia32_andpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __attribute__ ((__always_inline__, __nodebug__)) +_mm256_maskz_and_pd (__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d) __builtin_ia32_andpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128d __attribute__ ((__always_inline__, __nodebug__)) +_mm_mask_and_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_andpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __attribute__ ((__always_inline__, __nodebug__)) +_mm_maskz_and_pd (__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_andpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256 __attribute__ ((__always_inline__, __nodebug__)) +_mm256_mask_and_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256) __builtin_ia32_andps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __attribute__ ((__always_inline__, __nodebug__)) +_mm256_maskz_and_ps (__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256) __builtin_ia32_andps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128 __attribute__ ((__always_inline__, __nodebug__)) +_mm_mask_and_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_andps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __attribute__ ((__always_inline__, __nodebug__)) +_mm_maskz_and_ps (__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_andps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256d __attribute__ ((__always_inline__, __nodebug__)) +_mm256_mask_xor_pd (__m256d __W, __mmask8 __U, __m256d __A, + __m256d __B) { + return (__m256d) __builtin_ia32_xorpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __attribute__ ((__always_inline__, __nodebug__)) +_mm256_maskz_xor_pd (__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d) __builtin_ia32_xorpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128d __attribute__ ((__always_inline__, __nodebug__)) +_mm_mask_xor_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_xorpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __attribute__ ((__always_inline__, __nodebug__)) +_mm_maskz_xor_pd (__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_xorpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256 __attribute__ ((__always_inline__, __nodebug__)) +_mm256_mask_xor_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256) __builtin_ia32_xorps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __attribute__ ((__always_inline__, __nodebug__)) +_mm256_maskz_xor_ps (__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256) __builtin_ia32_xorps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128 __attribute__ ((__always_inline__, __nodebug__)) +_mm_mask_xor_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_xorps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __attribute__ ((__always_inline__, __nodebug__)) +_mm_maskz_xor_ps (__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_xorps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m256d __attribute__ ((__always_inline__, __nodebug__)) +_mm256_mask_or_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d) __builtin_ia32_orpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) __W, + (__mmask8) __U); +} + +static __inline__ __m256d __attribute__ ((__always_inline__, __nodebug__)) +_mm256_maskz_or_pd (__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d) __builtin_ia32_orpd256_mask ((__v4df) __A, + (__v4df) __B, + (__v4df) + _mm256_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m128d __attribute__ ((__always_inline__, __nodebug__)) +_mm_mask_or_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_orpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) __W, + (__mmask8) __U); +} + +static __inline__ __m128d __attribute__ ((__always_inline__, __nodebug__)) +_mm_maskz_or_pd (__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d) __builtin_ia32_orpd128_mask ((__v2df) __A, + (__v2df) __B, + (__v2df) + _mm_setzero_pd (), + (__mmask8) __U); +} + +static __inline__ __m256 __attribute__ ((__always_inline__, __nodebug__)) +_mm256_mask_or_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256) __builtin_ia32_orps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) __W, + (__mmask8) __U); +} + +static __inline__ __m256 __attribute__ ((__always_inline__, __nodebug__)) +_mm256_maskz_or_ps (__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256) __builtin_ia32_orps256_mask ((__v8sf) __A, + (__v8sf) __B, + (__v8sf) + _mm256_setzero_ps (), + (__mmask8) __U); +} + +static __inline__ __m128 __attribute__ ((__always_inline__, __nodebug__)) +_mm_mask_or_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_orps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) __W, + (__mmask8) __U); +} + +static __inline__ __m128 __attribute__ ((__always_inline__, __nodebug__)) +_mm_maskz_or_ps (__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128) __builtin_ia32_orps128_mask ((__v4sf) __A, + (__v4sf) __B, + (__v4sf) + _mm_setzero_ps (), + (__mmask8) __U); +} + +#endif diff --git a/contrib/llvm/tools/clang/lib/Headers/avx512vlintrin.h b/contrib/llvm/tools/clang/lib/Headers/avx512vlintrin.h index 8a374b1..9de0cf4 100644 --- a/contrib/llvm/tools/clang/lib/Headers/avx512vlintrin.h +++ b/contrib/llvm/tools/clang/lib/Headers/avx512vlintrin.h @@ -42,6 +42,17 @@ _mm_mask_cmpeq_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) { __u); } +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpeq_epu32_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 0, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpeq_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 0, + __u); +} static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) _mm256_cmpeq_epi32_mask(__m256i __a, __m256i __b) { @@ -56,6 +67,18 @@ _mm256_mask_cmpeq_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) { } static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpeq_epu32_mask(__m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 0, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpeq_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 0, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) _mm_cmpeq_epi64_mask(__m128i __a, __m128i __b) { return (__mmask8)__builtin_ia32_pcmpeqq128_mask((__v2di)__a, (__v2di)__b, (__mmask8)-1); @@ -67,6 +90,17 @@ _mm_mask_cmpeq_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) { __u); } +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpeq_epu64_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 0, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpeq_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 0, + __u); +} static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) _mm256_cmpeq_epi64_mask(__m256i __a, __m256i __b) { @@ -80,4 +114,1206 @@ _mm256_mask_cmpeq_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) { __u); } +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpeq_epu64_mask(__m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 0, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpeq_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 0, + __u); +} + + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpge_epi32_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 5, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpge_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 5, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpge_epu32_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 5, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpge_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 5, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpge_epi32_mask(__m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 5, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpge_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 5, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpge_epu32_mask(__m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 5, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpge_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 5, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpge_epi64_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 5, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpge_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 5, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpge_epu64_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 5, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpge_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 5, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpge_epi64_mask(__m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 5, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpge_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 5, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpge_epu64_mask(__m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 5, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpge_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 5, + __u); +} + + + + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpgt_epi32_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_pcmpgtd128_mask((__v4si)__a, (__v4si)__b, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpgt_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_pcmpgtd128_mask((__v4si)__a, (__v4si)__b, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpgt_epu32_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 6, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpgt_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 6, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpgt_epi32_mask(__m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_pcmpgtd256_mask((__v8si)__a, (__v8si)__b, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpgt_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_pcmpgtd256_mask((__v8si)__a, (__v8si)__b, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpgt_epu32_mask(__m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 6, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpgt_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 6, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpgt_epi64_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_pcmpgtq128_mask((__v2di)__a, (__v2di)__b, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpgt_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_pcmpgtq128_mask((__v2di)__a, (__v2di)__b, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpgt_epu64_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 6, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpgt_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 6, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpgt_epi64_mask(__m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_pcmpgtq256_mask((__v4di)__a, (__v4di)__b, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpgt_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_pcmpgtq256_mask((__v4di)__a, (__v4di)__b, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpgt_epu64_mask(__m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 6, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpgt_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 6, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmple_epi32_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 2, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmple_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 2, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmple_epu32_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 2, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmple_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 2, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmple_epi32_mask(__m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 2, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmple_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 2, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmple_epu32_mask(__m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 2, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmple_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 2, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmple_epi64_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 2, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmple_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 2, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmple_epu64_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 2, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmple_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 2, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmple_epi64_mask(__m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 2, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmple_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 2, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmple_epu64_mask(__m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 2, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmple_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 2, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmplt_epi32_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 1, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmplt_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 1, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmplt_epu32_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 1, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmplt_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 1, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmplt_epi32_mask(__m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 1, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmplt_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 1, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmplt_epu32_mask(__m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 1, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmplt_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 1, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmplt_epi64_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 1, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmplt_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 1, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmplt_epu64_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 1, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmplt_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 1, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmplt_epi64_mask(__m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 1, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmplt_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 1, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmplt_epu64_mask(__m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 1, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmplt_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 1, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpneq_epi32_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 4, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpneq_epi32_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)__a, (__v4si)__b, 4, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpneq_epu32_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 4, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpneq_epu32_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)__a, (__v4si)__b, 4, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpneq_epi32_mask(__m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 4, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpneq_epi32_mask(__mmask8 __u, __m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)__a, (__v8si)__b, 4, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpneq_epu32_mask(__m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 4, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpneq_epu32_mask(__mmask8 __u, __m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)__a, (__v8si)__b, 4, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpneq_epi64_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 4, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpneq_epi64_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)__a, (__v2di)__b, 4, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_cmpneq_epu64_mask(__m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 4, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm_mask_cmpneq_epu64_mask(__mmask8 __u, __m128i __a, __m128i __b) { + return (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)__a, (__v2di)__b, 4, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpneq_epi64_mask(__m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 4, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpneq_epi64_mask(__mmask8 __u, __m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)__a, (__v4di)__b, 4, + __u); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpneq_epu64_mask(__m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 4, + (__mmask8)-1); +} + +static __inline__ __mmask8 __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_cmpneq_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) { + return (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)__a, (__v4di)__b, 4, + __u); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_add_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_paddd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_maskz_add_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_paddd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_add_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_paddq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_maskz_add_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_paddq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_sub_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_psubd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_maskz_sub_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_psubd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_sub_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_psubq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_maskz_sub_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_psubq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_mask_add_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_paddd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_maskz_add_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_paddd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_mask_add_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_paddq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_maskz_add_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_paddq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_mask_sub_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_psubd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_maskz_sub_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psubd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_mask_sub_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_psubq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_maskz_sub_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_psubq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_mul_epi32 (__m256i __W, __mmask8 __M, __m256i __X, + __m256i __Y) +{ + return (__m256i) __builtin_ia32_pmuldq256_mask ((__v8si) __X, + (__v8si) __Y, + (__v4di) __W, __M); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_maskz_mul_epi32 (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_pmuldq256_mask ((__v8si) __X, + (__v8si) __Y, + (__v4di) + _mm256_setzero_si256 (), + __M); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_mask_mul_epi32 (__m128i __W, __mmask8 __M, __m128i __X, + __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmuldq128_mask ((__v4si) __X, + (__v4si) __Y, + (__v2di) __W, __M); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_maskz_mul_epi32 (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmuldq128_mask ((__v4si) __X, + (__v4si) __Y, + (__v2di) + _mm_setzero_si128 (), + __M); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mask_mul_epu32 (__m256i __W, __mmask8 __M, __m256i __X, + __m256i __Y) +{ + return (__m256i) __builtin_ia32_pmuludq256_mask ((__v8si) __X, + (__v8si) __Y, + (__v4di) __W, __M); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_maskz_mul_epu32 (__mmask8 __M, __m256i __X, __m256i __Y) +{ + return (__m256i) __builtin_ia32_pmuludq256_mask ((__v8si) __X, + (__v8si) __Y, + (__v4di) + _mm256_setzero_si256 (), + __M); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_mask_mul_epu32 (__m128i __W, __mmask8 __M, __m128i __X, + __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmuludq128_mask ((__v4si) __X, + (__v4si) __Y, + (__v2di) __W, __M); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_maskz_mul_epu32 (__mmask8 __M, __m128i __X, __m128i __Y) +{ + return (__m128i) __builtin_ia32_pmuludq128_mask ((__v4si) __X, + (__v4si) __Y, + (__v2di) + _mm_setzero_si128 (), + __M); +} + +static __inline__ __m256i __attribute__ ((__always_inline__, __nodebug__)) +_mm256_maskz_mullo_epi32 (__mmask8 __M, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pmulld256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + __M); +} + +static __inline__ __m256i __attribute__ ((__always_inline__, __nodebug__)) +_mm256_mask_mullo_epi32 (__m256i __W, __mmask8 __M, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pmulld256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, __M); +} + +static __inline__ __m128i __attribute__ ((__always_inline__, __nodebug__)) +_mm_maskz_mullo_epi32 (__mmask8 __M, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pmulld128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + __M); +} + +static __inline__ __m128i __attribute__ ((__always_inline__, __nodebug__)) +_mm_mask_mullo_epi32 (__m128i __W, __mmask16 __M, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pmulld128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, __M); +} + +static __inline__ __m256i __attribute__ ((__always_inline__, __nodebug__)) +_mm256_mask_and_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pandd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __attribute__ ((__always_inline__, __nodebug__)) +_mm256_maskz_and_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pandd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__ ((__always_inline__, __nodebug__)) +_mm_mask_and_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pandd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__ ((__always_inline__, __nodebug__)) +_mm_maskz_and_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pandd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __attribute__ ((__always_inline__, __nodebug__)) +_mm256_mask_andnot_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pandnd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __attribute__ ((__always_inline__, __nodebug__)) +_mm256_maskz_andnot_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pandnd256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__ ((__always_inline__, __nodebug__)) +_mm_mask_andnot_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pandnd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__ ((__always_inline__, __nodebug__)) +_mm_maskz_andnot_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pandnd128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __attribute__ ((__always_inline__, __nodebug__)) +_mm256_mask_or_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pord256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __attribute__ ((__always_inline__, __nodebug__)) +_mm256_maskz_or_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pord256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__ ((__always_inline__, __nodebug__)) +_mm_mask_or_epi32 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pord128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__ ((__always_inline__, __nodebug__)) +_mm_maskz_or_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pord128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __attribute__ ((__always_inline__, __nodebug__)) +_mm256_mask_xor_epi32 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pxord256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __attribute__ ((__always_inline__, __nodebug__)) +_mm256_maskz_xor_epi32 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pxord256_mask ((__v8si) __A, + (__v8si) __B, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__ ((__always_inline__, __nodebug__)) +_mm_mask_xor_epi32 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pxord128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__ ((__always_inline__, __nodebug__)) +_mm_maskz_xor_epi32 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pxord128_mask ((__v4si) __A, + (__v4si) __B, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __attribute__ ((__always_inline__, __nodebug__)) +_mm256_mask_and_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pandq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, __U); +} + +static __inline__ __m256i __attribute__ ((__always_inline__, __nodebug__)) +_mm256_maskz_and_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pandq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_pd (), + __U); +} + +static __inline__ __m128i __attribute__ ((__always_inline__, __nodebug__)) +_mm_mask_and_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pandq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, __U); +} + +static __inline__ __m128i __attribute__ ((__always_inline__, __nodebug__)) +_mm_maskz_and_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pandq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_pd (), + __U); +} + +static __inline__ __m256i __attribute__ ((__always_inline__, __nodebug__)) +_mm256_mask_andnot_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pandnq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, __U); +} + +static __inline__ __m256i __attribute__ ((__always_inline__, __nodebug__)) +_mm256_maskz_andnot_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pandnq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_pd (), + __U); +} + +static __inline__ __m128i __attribute__ ((__always_inline__, __nodebug__)) +_mm_mask_andnot_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pandnq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, __U); +} + +static __inline__ __m128i __attribute__ ((__always_inline__, __nodebug__)) +_mm_maskz_andnot_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pandnq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_pd (), + __U); +} + +static __inline__ __m256i __attribute__ ((__always_inline__, __nodebug__)) +_mm256_mask_or_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_porq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __attribute__ ((__always_inline__, __nodebug__)) +_mm256_maskz_or_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_porq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__ ((__always_inline__, __nodebug__)) +_mm_mask_or_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_porq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__ ((__always_inline__, __nodebug__)) +_mm_maskz_or_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_porq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +static __inline__ __m256i __attribute__ ((__always_inline__, __nodebug__)) +_mm256_mask_xor_epi64 (__m256i __W, __mmask8 __U, __m256i __A, + __m256i __B) +{ + return (__m256i) __builtin_ia32_pxorq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __attribute__ ((__always_inline__, __nodebug__)) +_mm256_maskz_xor_epi64 (__mmask8 __U, __m256i __A, __m256i __B) +{ + return (__m256i) __builtin_ia32_pxorq256_mask ((__v4di) __A, + (__v4di) __B, + (__v4di) + _mm256_setzero_si256 (), + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__ ((__always_inline__, __nodebug__)) +_mm_mask_xor_epi64 (__m128i __W, __mmask8 __U, __m128i __A, + __m128i __B) +{ + return (__m128i) __builtin_ia32_pxorq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __attribute__ ((__always_inline__, __nodebug__)) +_mm_maskz_xor_epi64 (__mmask8 __U, __m128i __A, __m128i __B) +{ + return (__m128i) __builtin_ia32_pxorq128_mask ((__v2di) __A, + (__v2di) __B, + (__v2di) + _mm_setzero_si128 (), + (__mmask8) __U); +} + +#define _mm_cmp_epi32_mask(a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \ + (__v4si)(__m128i)(b), \ + (p), (__mmask8)-1); }) + +#define _mm_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \ + (__v4si)(__m128i)(b), \ + (p), (__mmask8)(m)); }) + +#define _mm_cmp_epu32_mask(a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \ + (__v4si)(__m128i)(b), \ + (p), (__mmask8)-1); }) + +#define _mm_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \ + (__v4si)(__m128i)(b), \ + (p), (__mmask8)(m)); }) + +#define _mm256_cmp_epi32_mask(a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \ + (__v8si)(__m256i)(b), \ + (p), (__mmask8)-1); }) + +#define _mm256_mask_cmp_epi32_mask(m, a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \ + (__v8si)(__m256i)(b), \ + (p), (__mmask8)(m)); }) + +#define _mm256_cmp_epu32_mask(a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \ + (__v8si)(__m256i)(b), \ + (p), (__mmask8)-1); }) + +#define _mm256_mask_cmp_epu32_mask(m, a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \ + (__v8si)(__m256i)(b), \ + (p), (__mmask8)(m)); }) + +#define _mm_cmp_epi64_mask(a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \ + (__v2di)(__m128i)(b), \ + (p), (__mmask8)-1); }) + +#define _mm_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \ + (__v2di)(__m128i)(b), \ + (p), (__mmask8)(m)); }) + +#define _mm_cmp_epu64_mask(a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \ + (__v2di)(__m128i)(b), \ + (p), (__mmask8)-1); }) + +#define _mm_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \ + (__v2di)(__m128i)(b), \ + (p), (__mmask8)(m)); }) + +#define _mm256_cmp_epi64_mask(a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \ + (__v4di)(__m256i)(b), \ + (p), (__mmask8)-1); }) + +#define _mm256_mask_cmp_epi64_mask(m, a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \ + (__v4di)(__m256i)(b), \ + (p), (__mmask8)(m)); }) + +#define _mm256_cmp_epu64_mask(a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \ + (__v4di)(__m256i)(b), \ + (p), (__mmask8)-1); }) + +#define _mm256_mask_cmp_epu64_mask(m, a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \ + (__v4di)(__m256i)(b), \ + (p), (__mmask8)(m)); }) + +#define _mm256_cmp_ps_mask(a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \ + (__v8sf)(__m256)(b), \ + (p), (__mmask8)-1); }) + +#define _mm256_mask_cmp_ps_mask(m, a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \ + (__v8sf)(__m256)(b), \ + (p), (__mmask8)(m)); }) + +#define _mm256_cmp_pd_mask(a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256)(a), \ + (__v4df)(__m256)(b), \ + (p), (__mmask8)-1); }) + +#define _mm256_mask_cmp_pd_mask(m, a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256)(a), \ + (__v4df)(__m256)(b), \ + (p), (__mmask8)(m)); }) + +#define _mm128_cmp_ps_mask(a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \ + (__v4sf)(__m128)(b), \ + (p), (__mmask8)-1); }) + +#define _mm128_mask_cmp_ps_mask(m, a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \ + (__v4sf)(__m128)(b), \ + (p), (__mmask8)(m)); }) + +#define _mm128_cmp_pd_mask(a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128)(a), \ + (__v2df)(__m128)(b), \ + (p), (__mmask8)-1); }) + +#define _mm128_mask_cmp_pd_mask(m, a, b, p) __extension__ ({ \ + (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128)(a), \ + (__v2df)(__m128)(b), \ + (p), (__mmask8)(m)); }) #endif /* __AVX512VLINTRIN_H */ diff --git a/contrib/llvm/tools/clang/lib/Headers/avxintrin.h b/contrib/llvm/tools/clang/lib/Headers/avxintrin.h index 4e1044a..4907965 100644 --- a/contrib/llvm/tools/clang/lib/Headers/avxintrin.h +++ b/contrib/llvm/tools/clang/lib/Headers/avxintrin.h @@ -257,8 +257,7 @@ _mm_permutevar_ps(__m128 __a, __m128i __c) static __inline __m256 __attribute__((__always_inline__, __nodebug__)) _mm256_permutevar_ps(__m256 __a, __m256i __c) { - return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)__a, - (__v8si)__c); + return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)__a, (__v8si)__c); } #define _mm_permute_pd(A, C) __extension__ ({ \ @@ -430,35 +429,22 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) __m128 __b = (b); \ (__m128)__builtin_ia32_cmpss((__v4sf)__a, (__v4sf)__b, (c)); }) -/* Vector extract */ -#define _mm256_extractf128_pd(A, O) __extension__ ({ \ - __m256d __A = (A); \ - (__m128d)__builtin_ia32_vextractf128_pd256((__v4df)__A, (O)); }) - -#define _mm256_extractf128_ps(A, O) __extension__ ({ \ - __m256 __A = (A); \ - (__m128)__builtin_ia32_vextractf128_ps256((__v8sf)__A, (O)); }) - -#define _mm256_extractf128_si256(A, O) __extension__ ({ \ - __m256i __A = (A); \ - (__m128i)__builtin_ia32_vextractf128_si256((__v8si)__A, (O)); }) - static __inline int __attribute__((__always_inline__, __nodebug__)) -_mm256_extract_epi32(__m256i __a, int const __imm) +_mm256_extract_epi32(__m256i __a, const int __imm) { __v8si __b = (__v8si)__a; return __b[__imm & 7]; } static __inline int __attribute__((__always_inline__, __nodebug__)) -_mm256_extract_epi16(__m256i __a, int const __imm) +_mm256_extract_epi16(__m256i __a, const int __imm) { __v16hi __b = (__v16hi)__a; return __b[__imm & 15]; } static __inline int __attribute__((__always_inline__, __nodebug__)) -_mm256_extract_epi8(__m256i __a, int const __imm) +_mm256_extract_epi8(__m256i __a, const int __imm) { __v32qi __b = (__v32qi)__a; return __b[__imm & 31]; @@ -473,22 +459,6 @@ _mm256_extract_epi64(__m256i __a, const int __imm) } #endif -/* Vector insert */ -#define _mm256_insertf128_pd(V1, V2, O) __extension__ ({ \ - __m256d __V1 = (V1); \ - __m128d __V2 = (V2); \ - (__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)__V1, (__v2df)__V2, (O)); }) - -#define _mm256_insertf128_ps(V1, V2, O) __extension__ ({ \ - __m256 __V1 = (V1); \ - __m128 __V2 = (V2); \ - (__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)__V1, (__v4sf)__V2, (O)); }) - -#define _mm256_insertf128_si256(V1, V2, O) __extension__ ({ \ - __m256i __V1 = (V1); \ - __m128i __V2 = (V2); \ - (__m256i)__builtin_ia32_vinsertf128_si256((__v8si)__V1, (__v4si)__V2, (O)); }) - static __inline __m256i __attribute__((__always_inline__, __nodebug__)) _mm256_insert_epi32(__m256i __a, int __b, int const __imm) { @@ -515,7 +485,7 @@ _mm256_insert_epi8(__m256i __a, int __b, int const __imm) #ifdef __x86_64__ static __inline __m256i __attribute__((__always_inline__, __nodebug__)) -_mm256_insert_epi64(__m256i __a, int __b, int const __imm) +_mm256_insert_epi64(__m256i __a, long long __b, int const __imm) { __v4di __c = (__v4di)__a; __c[__imm & 3] = __b; @@ -785,7 +755,7 @@ _mm256_loadu_pd(double const *__p) { struct __loadu_pd { __m256d __v; - } __attribute__((packed, may_alias)); + } __attribute__((__packed__, __may_alias__)); return ((struct __loadu_pd*)__p)->__v; } @@ -794,7 +764,7 @@ _mm256_loadu_ps(float const *__p) { struct __loadu_ps { __m256 __v; - } __attribute__((packed, may_alias)); + } __attribute__((__packed__, __may_alias__)); return ((struct __loadu_ps*)__p)->__v; } @@ -809,7 +779,7 @@ _mm256_loadu_si256(__m256i const *__p) { struct __loadu_si256 { __m256i __v; - } __attribute__((packed, may_alias)); + } __attribute__((__packed__, __may_alias__)); return ((struct __loadu_si256*)__p)->__v; } @@ -935,23 +905,23 @@ _mm256_set_pd(double __a, double __b, double __c, double __d) static __inline __m256 __attribute__((__always_inline__, __nodebug__)) _mm256_set_ps(float __a, float __b, float __c, float __d, - float __e, float __f, float __g, float __h) + float __e, float __f, float __g, float __h) { return (__m256){ __h, __g, __f, __e, __d, __c, __b, __a }; } static __inline __m256i __attribute__((__always_inline__, __nodebug__)) _mm256_set_epi32(int __i0, int __i1, int __i2, int __i3, - int __i4, int __i5, int __i6, int __i7) + int __i4, int __i5, int __i6, int __i7) { return (__m256i)(__v8si){ __i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0 }; } static __inline __m256i __attribute__((__always_inline__, __nodebug__)) _mm256_set_epi16(short __w15, short __w14, short __w13, short __w12, - short __w11, short __w10, short __w09, short __w08, - short __w07, short __w06, short __w05, short __w04, - short __w03, short __w02, short __w01, short __w00) + short __w11, short __w10, short __w09, short __w08, + short __w07, short __w06, short __w05, short __w04, + short __w03, short __w02, short __w01, short __w00) { return (__m256i)(__v16hi){ __w00, __w01, __w02, __w03, __w04, __w05, __w06, __w07, __w08, __w09, __w10, __w11, __w12, __w13, __w14, __w15 }; @@ -959,13 +929,13 @@ _mm256_set_epi16(short __w15, short __w14, short __w13, short __w12, static __inline __m256i __attribute__((__always_inline__, __nodebug__)) _mm256_set_epi8(char __b31, char __b30, char __b29, char __b28, - char __b27, char __b26, char __b25, char __b24, - char __b23, char __b22, char __b21, char __b20, - char __b19, char __b18, char __b17, char __b16, - char __b15, char __b14, char __b13, char __b12, - char __b11, char __b10, char __b09, char __b08, - char __b07, char __b06, char __b05, char __b04, - char __b03, char __b02, char __b01, char __b00) + char __b27, char __b26, char __b25, char __b24, + char __b23, char __b22, char __b21, char __b20, + char __b19, char __b18, char __b17, char __b16, + char __b15, char __b14, char __b13, char __b12, + char __b11, char __b10, char __b09, char __b08, + char __b07, char __b06, char __b05, char __b04, + char __b03, char __b02, char __b01, char __b00) { return (__m256i)(__v32qi){ __b00, __b01, __b02, __b03, __b04, __b05, __b06, __b07, @@ -990,23 +960,23 @@ _mm256_setr_pd(double __a, double __b, double __c, double __d) static __inline __m256 __attribute__((__always_inline__, __nodebug__)) _mm256_setr_ps(float __a, float __b, float __c, float __d, - float __e, float __f, float __g, float __h) + float __e, float __f, float __g, float __h) { return (__m256){ __a, __b, __c, __d, __e, __f, __g, __h }; } static __inline __m256i __attribute__((__always_inline__, __nodebug__)) _mm256_setr_epi32(int __i0, int __i1, int __i2, int __i3, - int __i4, int __i5, int __i6, int __i7) + int __i4, int __i5, int __i6, int __i7) { return (__m256i)(__v8si){ __i0, __i1, __i2, __i3, __i4, __i5, __i6, __i7 }; } static __inline __m256i __attribute__((__always_inline__, __nodebug__)) _mm256_setr_epi16(short __w15, short __w14, short __w13, short __w12, - short __w11, short __w10, short __w09, short __w08, - short __w07, short __w06, short __w05, short __w04, - short __w03, short __w02, short __w01, short __w00) + short __w11, short __w10, short __w09, short __w08, + short __w07, short __w06, short __w05, short __w04, + short __w03, short __w02, short __w01, short __w00) { return (__m256i)(__v16hi){ __w15, __w14, __w13, __w12, __w11, __w10, __w09, __w08, __w07, __w06, __w05, __w04, __w03, __w02, __w01, __w00 }; @@ -1014,19 +984,19 @@ _mm256_setr_epi16(short __w15, short __w14, short __w13, short __w12, static __inline __m256i __attribute__((__always_inline__, __nodebug__)) _mm256_setr_epi8(char __b31, char __b30, char __b29, char __b28, - char __b27, char __b26, char __b25, char __b24, - char __b23, char __b22, char __b21, char __b20, - char __b19, char __b18, char __b17, char __b16, - char __b15, char __b14, char __b13, char __b12, - char __b11, char __b10, char __b09, char __b08, - char __b07, char __b06, char __b05, char __b04, - char __b03, char __b02, char __b01, char __b00) + char __b27, char __b26, char __b25, char __b24, + char __b23, char __b22, char __b21, char __b20, + char __b19, char __b18, char __b17, char __b16, + char __b15, char __b14, char __b13, char __b12, + char __b11, char __b10, char __b09, char __b08, + char __b07, char __b06, char __b05, char __b04, + char __b03, char __b02, char __b01, char __b00) { return (__m256i)(__v32qi){ __b31, __b30, __b29, __b28, __b27, __b26, __b25, __b24, - __b23, __b22, __b21, __b20, __b19, __b18, __b17, __b16, - __b15, __b14, __b13, __b12, __b11, __b10, __b09, __b08, - __b07, __b06, __b05, __b04, __b03, __b02, __b01, __b00 }; + __b23, __b22, __b21, __b20, __b19, __b18, __b17, __b16, + __b15, __b14, __b13, __b12, __b11, __b10, __b09, __b08, + __b07, __b06, __b05, __b04, __b03, __b02, __b01, __b00 }; } static __inline __m256i __attribute__((__always_inline__, __nodebug__)) @@ -1167,6 +1137,70 @@ _mm256_castsi128_si256(__m128i __a) return __builtin_shufflevector(__a, __a, 0, 1, -1, -1); } +/* + Vector insert. + We use macros rather than inlines because we only want to accept + invocations where the immediate M is a constant expression. +*/ +#define _mm256_insertf128_ps(V1, V2, M) __extension__ ({ \ + (__m256)__builtin_shufflevector( \ + (__v8sf)(V1), \ + (__v8sf)_mm256_castps128_ps256((__m128)(V2)), \ + (((M) & 1) ? 0 : 8), \ + (((M) & 1) ? 1 : 9), \ + (((M) & 1) ? 2 : 10), \ + (((M) & 1) ? 3 : 11), \ + (((M) & 1) ? 8 : 4), \ + (((M) & 1) ? 9 : 5), \ + (((M) & 1) ? 10 : 6), \ + (((M) & 1) ? 11 : 7) );}) + +#define _mm256_insertf128_pd(V1, V2, M) __extension__ ({ \ + (__m256d)__builtin_shufflevector( \ + (__v4df)(V1), \ + (__v4df)_mm256_castpd128_pd256((__m128d)(V2)), \ + (((M) & 1) ? 0 : 4), \ + (((M) & 1) ? 1 : 5), \ + (((M) & 1) ? 4 : 2), \ + (((M) & 1) ? 5 : 3) );}) + +#define _mm256_insertf128_si256(V1, V2, M) __extension__ ({ \ + (__m256i)__builtin_shufflevector( \ + (__v4di)(V1), \ + (__v4di)_mm256_castsi128_si256((__m128i)(V2)), \ + (((M) & 1) ? 0 : 4), \ + (((M) & 1) ? 1 : 5), \ + (((M) & 1) ? 4 : 2), \ + (((M) & 1) ? 5 : 3) );}) + +/* + Vector extract. + We use macros rather than inlines because we only want to accept + invocations where the immediate M is a constant expression. +*/ +#define _mm256_extractf128_ps(V, M) __extension__ ({ \ + (__m128)__builtin_shufflevector( \ + (__v8sf)(V), \ + (__v8sf)(_mm256_setzero_ps()), \ + (((M) & 1) ? 4 : 0), \ + (((M) & 1) ? 5 : 1), \ + (((M) & 1) ? 6 : 2), \ + (((M) & 1) ? 7 : 3) );}) + +#define _mm256_extractf128_pd(V, M) __extension__ ({ \ + (__m128d)__builtin_shufflevector( \ + (__v4df)(V), \ + (__v4df)(_mm256_setzero_pd()), \ + (((M) & 1) ? 2 : 0), \ + (((M) & 1) ? 3 : 1) );}) + +#define _mm256_extractf128_si256(V, M) __extension__ ({ \ + (__m128i)__builtin_shufflevector( \ + (__v4di)(V), \ + (__v4di)(_mm256_setzero_si256()), \ + (((M) & 1) ? 2 : 0), \ + (((M) & 1) ? 3 : 1) );}) + /* SIMD load ops (unaligned) */ static __inline __m256 __attribute__((__always_inline__, __nodebug__)) _mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo) @@ -1195,7 +1229,7 @@ _mm256_loadu2_m128i(__m128i const *__addr_hi, __m128i const *__addr_lo) { struct __loadu_si128 { __m128i __v; - } __attribute__((packed, may_alias)); + } __attribute__((__packed__, __may_alias__)); __m256i __v256 = _mm256_castsi128_si256( ((struct __loadu_si128*)__addr_lo)->__v); return _mm256_insertf128_si256(__v256, @@ -1236,4 +1270,34 @@ _mm256_storeu2_m128i(__m128i *__addr_hi, __m128i *__addr_lo, __m256i __a) __builtin_ia32_storedqu((char *)__addr_hi, (__v16qi)__v128); } +static __inline __m256 __attribute__((__always_inline__, __nodebug__)) +_mm256_set_m128 (__m128 __hi, __m128 __lo) { + return (__m256) __builtin_shufflevector(__lo, __hi, 0, 1, 2, 3, 4, 5, 6, 7); +} + +static __inline __m256d __attribute__((__always_inline__, __nodebug__)) +_mm256_set_m128d (__m128d __hi, __m128d __lo) { + return (__m256d)_mm256_set_m128((__m128)__hi, (__m128)__lo); +} + +static __inline __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_set_m128i (__m128i __hi, __m128i __lo) { + return (__m256i)_mm256_set_m128((__m128)__hi, (__m128)__lo); +} + +static __inline __m256 __attribute__((__always_inline__, __nodebug__)) +_mm256_setr_m128 (__m128 __lo, __m128 __hi) { + return _mm256_set_m128(__hi, __lo); +} + +static __inline __m256d __attribute__((__always_inline__, __nodebug__)) +_mm256_setr_m128d (__m128d __lo, __m128d __hi) { + return (__m256d)_mm256_set_m128((__m128)__hi, (__m128)__lo); +} + +static __inline __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_setr_m128i (__m128i __lo, __m128i __hi) { + return (__m256i)_mm256_set_m128((__m128)__hi, (__m128)__lo); +} + #endif /* __AVXINTRIN_H */ diff --git a/contrib/llvm/tools/clang/lib/Headers/cuda_builtin_vars.h b/contrib/llvm/tools/clang/lib/Headers/cuda_builtin_vars.h new file mode 100644 index 0000000..901356b --- /dev/null +++ b/contrib/llvm/tools/clang/lib/Headers/cuda_builtin_vars.h @@ -0,0 +1,110 @@ +/*===---- cuda_builtin_vars.h - CUDA built-in variables ---------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __CUDA_BUILTIN_VARS_H +#define __CUDA_BUILTIN_VARS_H + +// The file implements built-in CUDA variables using __declspec(property). +// https://msdn.microsoft.com/en-us/library/yhfk0thd.aspx +// All read accesses of built-in variable fields get converted into calls to a +// getter function which in turn would call appropriate builtin to fetch the +// value. +// +// Example: +// int x = threadIdx.x; +// IR output: +// %0 = call i32 @llvm.ptx.read.tid.x() #3 +// PTX output: +// mov.u32 %r2, %tid.x; + +#define __CUDA_DEVICE_BUILTIN(FIELD, INTRINSIC) \ + __declspec(property(get = __fetch_builtin_##FIELD)) unsigned int FIELD; \ + static inline __attribute__((always_inline)) \ + __attribute__((device)) unsigned int __fetch_builtin_##FIELD(void) { \ + return INTRINSIC; \ + } + +#if __cplusplus >= 201103L +#define __DELETE =delete +#else +#define __DELETE +#endif + +// Make sure nobody can create instances of the special varible types. nvcc +// also disallows taking address of special variables, so we disable address-of +// operator as well. +#define __CUDA_DISALLOW_BUILTINVAR_ACCESS(TypeName) \ + __attribute__((device)) TypeName() __DELETE; \ + __attribute__((device)) TypeName(const TypeName &) __DELETE; \ + __attribute__((device)) void operator=(const TypeName &) const __DELETE; \ + __attribute__((device)) TypeName *operator&() const __DELETE + +struct __cuda_builtin_threadIdx_t { + __CUDA_DEVICE_BUILTIN(x,__builtin_ptx_read_tid_x()); + __CUDA_DEVICE_BUILTIN(y,__builtin_ptx_read_tid_y()); + __CUDA_DEVICE_BUILTIN(z,__builtin_ptx_read_tid_z()); +private: + __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_threadIdx_t); +}; + +struct __cuda_builtin_blockIdx_t { + __CUDA_DEVICE_BUILTIN(x,__builtin_ptx_read_ctaid_x()); + __CUDA_DEVICE_BUILTIN(y,__builtin_ptx_read_ctaid_y()); + __CUDA_DEVICE_BUILTIN(z,__builtin_ptx_read_ctaid_z()); +private: + __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockIdx_t); +}; + +struct __cuda_builtin_blockDim_t { + __CUDA_DEVICE_BUILTIN(x,__builtin_ptx_read_ntid_x()); + __CUDA_DEVICE_BUILTIN(y,__builtin_ptx_read_ntid_y()); + __CUDA_DEVICE_BUILTIN(z,__builtin_ptx_read_ntid_z()); +private: + __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockDim_t); +}; + +struct __cuda_builtin_gridDim_t { + __CUDA_DEVICE_BUILTIN(x,__builtin_ptx_read_nctaid_x()); + __CUDA_DEVICE_BUILTIN(y,__builtin_ptx_read_nctaid_y()); + __CUDA_DEVICE_BUILTIN(z,__builtin_ptx_read_nctaid_z()); +private: + __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_gridDim_t); +}; + +#define __CUDA_BUILTIN_VAR \ + extern const __attribute__((device)) __attribute__((weak)) +__CUDA_BUILTIN_VAR __cuda_builtin_threadIdx_t threadIdx; +__CUDA_BUILTIN_VAR __cuda_builtin_blockIdx_t blockIdx; +__CUDA_BUILTIN_VAR __cuda_builtin_blockDim_t blockDim; +__CUDA_BUILTIN_VAR __cuda_builtin_gridDim_t gridDim; + +// warpSize should translate to read of %WARP_SZ but there's currently no +// builtin to do so. According to PTX v4.2 docs 'to date, all target +// architectures have a WARP_SZ value of 32'. +__attribute__((device)) const int warpSize = 32; + +#undef __CUDA_DEVICE_BUILTIN +#undef __CUDA_BUILTIN_VAR +#undef __CUDA_DISALLOW_BUILTINVAR_ACCESS + +#endif /* __CUDA_BUILTIN_VARS_H */ diff --git a/contrib/llvm/tools/clang/lib/Headers/emmintrin.h b/contrib/llvm/tools/clang/lib/Headers/emmintrin.h index 28d0043..c764d68 100644 --- a/contrib/llvm/tools/clang/lib/Headers/emmintrin.h +++ b/contrib/llvm/tools/clang/lib/Headers/emmintrin.h @@ -489,7 +489,7 @@ _mm_loadu_pd(double const *__dp) { struct __loadu_pd { __m128d __v; - } __attribute__((packed, may_alias)); + } __attribute__((__packed__, __may_alias__)); return ((struct __loadu_pd*)__dp)->__v; } @@ -825,11 +825,28 @@ _mm_xor_si128(__m128i __a, __m128i __b) return __a ^ __b; } -#define _mm_slli_si128(a, count) __extension__ ({ \ - _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wshadow\""); \ - __m128i __a = (a); \ - _Pragma("clang diagnostic pop"); \ - (__m128i)__builtin_ia32_pslldqi128(__a, (count)*8); }) +#define _mm_slli_si128(a, imm) __extension__ ({ \ + (__m128i)__builtin_shufflevector((__v16qi)_mm_setzero_si128(), \ + (__v16qi)(__m128i)(a), \ + ((imm)&0xF0) ? 0 : 16 - ((imm)&0xF), \ + ((imm)&0xF0) ? 0 : 17 - ((imm)&0xF), \ + ((imm)&0xF0) ? 0 : 18 - ((imm)&0xF), \ + ((imm)&0xF0) ? 0 : 19 - ((imm)&0xF), \ + ((imm)&0xF0) ? 0 : 20 - ((imm)&0xF), \ + ((imm)&0xF0) ? 0 : 21 - ((imm)&0xF), \ + ((imm)&0xF0) ? 0 : 22 - ((imm)&0xF), \ + ((imm)&0xF0) ? 0 : 23 - ((imm)&0xF), \ + ((imm)&0xF0) ? 0 : 24 - ((imm)&0xF), \ + ((imm)&0xF0) ? 0 : 25 - ((imm)&0xF), \ + ((imm)&0xF0) ? 0 : 26 - ((imm)&0xF), \ + ((imm)&0xF0) ? 0 : 27 - ((imm)&0xF), \ + ((imm)&0xF0) ? 0 : 28 - ((imm)&0xF), \ + ((imm)&0xF0) ? 0 : 29 - ((imm)&0xF), \ + ((imm)&0xF0) ? 0 : 30 - ((imm)&0xF), \ + ((imm)&0xF0) ? 0 : 31 - ((imm)&0xF)); }) + +#define _mm_bslli_si128(a, imm) \ + _mm_slli_si128((a), (imm)) static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) _mm_slli_epi16(__m128i __a, int __count) @@ -891,12 +908,28 @@ _mm_sra_epi32(__m128i __a, __m128i __count) return (__m128i)__builtin_ia32_psrad128((__v4si)__a, (__v4si)__count); } - -#define _mm_srli_si128(a, count) __extension__ ({ \ - _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wshadow\""); \ - __m128i __a = (a); \ - _Pragma("clang diagnostic pop"); \ - (__m128i)__builtin_ia32_psrldqi128(__a, (count)*8); }) +#define _mm_srli_si128(a, imm) __extension__ ({ \ + (__m128i)__builtin_shufflevector((__v16qi)(__m128i)(a), \ + (__v16qi)_mm_setzero_si128(), \ + ((imm)&0xF0) ? 16 : ((imm)&0xF) + 0, \ + ((imm)&0xF0) ? 16 : ((imm)&0xF) + 1, \ + ((imm)&0xF0) ? 16 : ((imm)&0xF) + 2, \ + ((imm)&0xF0) ? 16 : ((imm)&0xF) + 3, \ + ((imm)&0xF0) ? 16 : ((imm)&0xF) + 4, \ + ((imm)&0xF0) ? 16 : ((imm)&0xF) + 5, \ + ((imm)&0xF0) ? 16 : ((imm)&0xF) + 6, \ + ((imm)&0xF0) ? 16 : ((imm)&0xF) + 7, \ + ((imm)&0xF0) ? 16 : ((imm)&0xF) + 8, \ + ((imm)&0xF0) ? 16 : ((imm)&0xF) + 9, \ + ((imm)&0xF0) ? 16 : ((imm)&0xF) + 10, \ + ((imm)&0xF0) ? 16 : ((imm)&0xF) + 11, \ + ((imm)&0xF0) ? 16 : ((imm)&0xF) + 12, \ + ((imm)&0xF0) ? 16 : ((imm)&0xF) + 13, \ + ((imm)&0xF0) ? 16 : ((imm)&0xF) + 14, \ + ((imm)&0xF0) ? 16 : ((imm)&0xF) + 15); }) + +#define _mm_bsrli_si128(a, imm) \ + _mm_srli_si128((a), (imm)) static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) _mm_srli_epi16(__m128i __a, int __count) @@ -1070,7 +1103,7 @@ _mm_loadu_si128(__m128i const *__p) { struct __loadu_si128 { __m128i __v; - } __attribute__((packed, may_alias)); + } __attribute__((__packed__, __may_alias__)); return ((struct __loadu_si128*)__p)->__v; } @@ -1284,27 +1317,21 @@ _mm_movemask_epi8(__m128i __a) } #define _mm_shuffle_epi32(a, imm) __extension__ ({ \ - _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wshadow\""); \ - __m128i __a = (a); \ - _Pragma("clang diagnostic pop"); \ - (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si) _mm_set1_epi32(0), \ + (__m128i)__builtin_shufflevector((__v4si)(__m128i)(a), \ + (__v4si)_mm_set1_epi32(0), \ (imm) & 0x3, ((imm) & 0xc) >> 2, \ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6); }) #define _mm_shufflelo_epi16(a, imm) __extension__ ({ \ - _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wshadow\""); \ - __m128i __a = (a); \ - _Pragma("clang diagnostic pop"); \ - (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi) _mm_set1_epi16(0), \ + (__m128i)__builtin_shufflevector((__v8hi)(__m128i)(a), \ + (__v8hi)_mm_set1_epi16(0), \ (imm) & 0x3, ((imm) & 0xc) >> 2, \ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \ 4, 5, 6, 7); }) #define _mm_shufflehi_epi16(a, imm) __extension__ ({ \ - _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wshadow\""); \ - __m128i __a = (a); \ - _Pragma("clang diagnostic pop"); \ - (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi) _mm_set1_epi16(0), \ + (__m128i)__builtin_shufflevector((__v8hi)(__m128i)(a), \ + (__v8hi)_mm_set1_epi16(0), \ 0, 1, 2, 3, \ 4 + (((imm) & 0x03) >> 0), \ 4 + (((imm) & 0x0c) >> 2), \ @@ -1396,11 +1423,8 @@ _mm_movemask_pd(__m128d __a) } #define _mm_shuffle_pd(a, b, i) __extension__ ({ \ - _Pragma("clang diagnostic push") _Pragma("clang diagnostic ignored \"-Wshadow\""); \ - __m128d __a = (a); \ - __m128d __b = (b); \ - _Pragma("clang diagnostic pop"); \ - __builtin_shufflevector(__a, __b, (i) & 1, (((i) & 2) >> 1) + 2); }) + __builtin_shufflevector((__m128d)(a), (__m128d)(b), \ + (i) & 1, (((i) & 2) >> 1) + 2); }) static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) _mm_castpd_ps(__m128d __a) diff --git a/contrib/llvm/tools/clang/lib/Headers/htmintrin.h b/contrib/llvm/tools/clang/lib/Headers/htmintrin.h new file mode 100644 index 0000000..0088c7c --- /dev/null +++ b/contrib/llvm/tools/clang/lib/Headers/htmintrin.h @@ -0,0 +1,226 @@ +/*===---- htmintrin.h - Standard header for PowerPC HTM ---------------===*\ + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * +\*===----------------------------------------------------------------------===*/ + +#ifndef __HTMINTRIN_H +#define __HTMINTRIN_H + +#ifndef __HTM__ +#error "HTM instruction set not enabled" +#endif + +#ifdef __powerpc__ + +#include <stdint.h> + +typedef uint64_t texasr_t; +typedef uint32_t texasru_t; +typedef uint32_t texasrl_t; +typedef uintptr_t tfiar_t; +typedef uintptr_t tfhar_t; + +#define _HTM_STATE(CR0) ((CR0 >> 1) & 0x3) +#define _HTM_NONTRANSACTIONAL 0x0 +#define _HTM_SUSPENDED 0x1 +#define _HTM_TRANSACTIONAL 0x2 + +#define _TEXASR_EXTRACT_BITS(TEXASR,BITNUM,SIZE) \ + (((TEXASR) >> (63-(BITNUM))) & ((1<<(SIZE))-1)) +#define _TEXASRU_EXTRACT_BITS(TEXASR,BITNUM,SIZE) \ + (((TEXASR) >> (31-(BITNUM))) & ((1<<(SIZE))-1)) + +#define _TEXASR_FAILURE_CODE(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 7, 8) +#define _TEXASRU_FAILURE_CODE(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 7, 8) + +#define _TEXASR_FAILURE_PERSISTENT(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 7, 1) +#define _TEXASRU_FAILURE_PERSISTENT(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 7, 1) + +#define _TEXASR_DISALLOWED(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 8, 1) +#define _TEXASRU_DISALLOWED(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 8, 1) + +#define _TEXASR_NESTING_OVERFLOW(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 9, 1) +#define _TEXASRU_NESTING_OVERFLOW(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 9, 1) + +#define _TEXASR_FOOTPRINT_OVERFLOW(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 10, 1) +#define _TEXASRU_FOOTPRINT_OVERFLOW(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 10, 1) + +#define _TEXASR_SELF_INDUCED_CONFLICT(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 11, 1) +#define _TEXASRU_SELF_INDUCED_CONFLICT(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 11, 1) + +#define _TEXASR_NON_TRANSACTIONAL_CONFLICT(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 12, 1) +#define _TEXASRU_NON_TRANSACTIONAL_CONFLICT(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 12, 1) + +#define _TEXASR_TRANSACTION_CONFLICT(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 13, 1) +#define _TEXASRU_TRANSACTION_CONFLICT(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 13, 1) + +#define _TEXASR_TRANSLATION_INVALIDATION_CONFLICT(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 14, 1) +#define _TEXASRU_TRANSLATION_INVALIDATION_CONFLICT(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 14, 1) + +#define _TEXASR_IMPLEMENTAION_SPECIFIC(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 15, 1) +#define _TEXASRU_IMPLEMENTAION_SPECIFIC(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 15, 1) + +#define _TEXASR_INSTRUCTION_FETCH_CONFLICT(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 16, 1) +#define _TEXASRU_INSTRUCTION_FETCH_CONFLICT(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 16, 1) + +#define _TEXASR_ABORT(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 31, 1) +#define _TEXASRU_ABORT(TEXASRU) \ + _TEXASRU_EXTRACT_BITS(TEXASRU, 31, 1) + + +#define _TEXASR_SUSPENDED(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 32, 1) + +#define _TEXASR_PRIVILEGE(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 35, 2) + +#define _TEXASR_FAILURE_SUMMARY(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 36, 1) + +#define _TEXASR_TFIAR_EXACT(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 37, 1) + +#define _TEXASR_ROT(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 38, 1) + +#define _TEXASR_TRANSACTION_LEVEL(TEXASR) \ + _TEXASR_EXTRACT_BITS(TEXASR, 63, 12) + +#endif /* __powerpc */ + +#ifdef __s390__ + +/* Condition codes generated by tbegin */ +#define _HTM_TBEGIN_STARTED 0 +#define _HTM_TBEGIN_INDETERMINATE 1 +#define _HTM_TBEGIN_TRANSIENT 2 +#define _HTM_TBEGIN_PERSISTENT 3 + +/* The abort codes below this threshold are reserved for machine use. */ +#define _HTM_FIRST_USER_ABORT_CODE 256 + +/* The transaction diagnostic block is it is defined in the Principles + of Operation chapter 5-91. */ + +struct __htm_tdb { + unsigned char format; /* 0 */ + unsigned char flags; + unsigned char reserved1[4]; + unsigned short nesting_depth; + unsigned long long abort_code; /* 8 */ + unsigned long long conflict_token; /* 16 */ + unsigned long long atia; /* 24 */ + unsigned char eaid; /* 32 */ + unsigned char dxc; + unsigned char reserved2[2]; + unsigned int program_int_id; + unsigned long long exception_id; /* 40 */ + unsigned long long bea; /* 48 */ + unsigned char reserved3[72]; /* 56 */ + unsigned long long gprs[16]; /* 128 */ +} __attribute__((__packed__, __aligned__ (8))); + + +/* Helper intrinsics to retry tbegin in case of transient failure. */ + +static __inline int __attribute__((__always_inline__, __nodebug__)) +__builtin_tbegin_retry_null (int retry) +{ + int cc, i = 0; + + while ((cc = __builtin_tbegin(0)) == _HTM_TBEGIN_TRANSIENT + && i++ < retry) + __builtin_tx_assist(i); + + return cc; +} + +static __inline int __attribute__((__always_inline__, __nodebug__)) +__builtin_tbegin_retry_tdb (void *tdb, int retry) +{ + int cc, i = 0; + + while ((cc = __builtin_tbegin(tdb)) == _HTM_TBEGIN_TRANSIENT + && i++ < retry) + __builtin_tx_assist(i); + + return cc; +} + +#define __builtin_tbegin_retry(tdb, retry) \ + (__builtin_constant_p(tdb == 0) && tdb == 0 ? \ + __builtin_tbegin_retry_null(retry) : \ + __builtin_tbegin_retry_tdb(tdb, retry)) + +static __inline int __attribute__((__always_inline__, __nodebug__)) +__builtin_tbegin_retry_nofloat_null (int retry) +{ + int cc, i = 0; + + while ((cc = __builtin_tbegin_nofloat(0)) == _HTM_TBEGIN_TRANSIENT + && i++ < retry) + __builtin_tx_assist(i); + + return cc; +} + +static __inline int __attribute__((__always_inline__, __nodebug__)) +__builtin_tbegin_retry_nofloat_tdb (void *tdb, int retry) +{ + int cc, i = 0; + + while ((cc = __builtin_tbegin_nofloat(tdb)) == _HTM_TBEGIN_TRANSIENT + && i++ < retry) + __builtin_tx_assist(i); + + return cc; +} + +#define __builtin_tbegin_retry_nofloat(tdb, retry) \ + (__builtin_constant_p(tdb == 0) && tdb == 0 ? \ + __builtin_tbegin_retry_nofloat_null(retry) : \ + __builtin_tbegin_retry_nofloat_tdb(tdb, retry)) + +#endif /* __s390__ */ + +#endif /* __HTMINTRIN_H */ diff --git a/contrib/llvm/tools/clang/lib/Headers/htmxlintrin.h b/contrib/llvm/tools/clang/lib/Headers/htmxlintrin.h new file mode 100644 index 0000000..30f524d --- /dev/null +++ b/contrib/llvm/tools/clang/lib/Headers/htmxlintrin.h @@ -0,0 +1,363 @@ +/*===---- htmxlintrin.h - XL compiler HTM execution intrinsics-------------===*\ + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * +\*===----------------------------------------------------------------------===*/ + +#ifndef __HTMXLINTRIN_H +#define __HTMXLINTRIN_H + +#ifndef __HTM__ +#error "HTM instruction set not enabled" +#endif + +#include <htmintrin.h> + +#ifdef __powerpc__ + +#ifdef __cplusplus +extern "C" { +#endif + +#define _TEXASR_PTR(TM_BUF) \ + ((texasr_t *)((TM_BUF)+0)) +#define _TEXASRU_PTR(TM_BUF) \ + ((texasru_t *)((TM_BUF)+0)) +#define _TEXASRL_PTR(TM_BUF) \ + ((texasrl_t *)((TM_BUF)+4)) +#define _TFIAR_PTR(TM_BUF) \ + ((tfiar_t *)((TM_BUF)+8)) + +typedef char TM_buff_type[16]; + +/* This macro can be used to determine whether a transaction was successfully + started from the __TM_begin() and __TM_simple_begin() intrinsic functions + below. */ +#define _HTM_TBEGIN_STARTED 1 + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_simple_begin (void) +{ + if (__builtin_expect (__builtin_tbegin (0), 1)) + return _HTM_TBEGIN_STARTED; + return 0; +} + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_begin (void* const TM_buff) +{ + *_TEXASRL_PTR (TM_buff) = 0; + if (__builtin_expect (__builtin_tbegin (0), 1)) + return _HTM_TBEGIN_STARTED; +#ifdef __powerpc64__ + *_TEXASR_PTR (TM_buff) = __builtin_get_texasr (); +#else + *_TEXASRU_PTR (TM_buff) = __builtin_get_texasru (); + *_TEXASRL_PTR (TM_buff) = __builtin_get_texasr (); +#endif + *_TFIAR_PTR (TM_buff) = __builtin_get_tfiar (); + return 0; +} + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_end (void) +{ + if (__builtin_expect (__builtin_tend (0), 1)) + return 1; + return 0; +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_abort (void) +{ + __builtin_tabort (0); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_named_abort (unsigned char const code) +{ + __builtin_tabort (code); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_resume (void) +{ + __builtin_tresume (); +} + +extern __inline void +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_suspend (void) +{ + __builtin_tsuspend (); +} + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_is_user_abort (void* const TM_buff) +{ + texasru_t texasru = *_TEXASRU_PTR (TM_buff); + return _TEXASRU_ABORT (texasru); +} + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_is_named_user_abort (void* const TM_buff, unsigned char *code) +{ + texasru_t texasru = *_TEXASRU_PTR (TM_buff); + + *code = _TEXASRU_FAILURE_CODE (texasru); + return _TEXASRU_ABORT (texasru); +} + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_is_illegal (void* const TM_buff) +{ + texasru_t texasru = *_TEXASRU_PTR (TM_buff); + return _TEXASRU_DISALLOWED (texasru); +} + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_is_footprint_exceeded (void* const TM_buff) +{ + texasru_t texasru = *_TEXASRU_PTR (TM_buff); + return _TEXASRU_FOOTPRINT_OVERFLOW (texasru); +} + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_nesting_depth (void* const TM_buff) +{ + texasrl_t texasrl; + + if (_HTM_STATE (__builtin_ttest ()) == _HTM_NONTRANSACTIONAL) + { + texasrl = *_TEXASRL_PTR (TM_buff); + if (!_TEXASR_FAILURE_SUMMARY (texasrl)) + texasrl = 0; + } + else + texasrl = (texasrl_t) __builtin_get_texasr (); + + return _TEXASR_TRANSACTION_LEVEL (texasrl); +} + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_is_nested_too_deep(void* const TM_buff) +{ + texasru_t texasru = *_TEXASRU_PTR (TM_buff); + return _TEXASRU_NESTING_OVERFLOW (texasru); +} + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_is_conflict(void* const TM_buff) +{ + texasru_t texasru = *_TEXASRU_PTR (TM_buff); + /* Return TEXASR bits 11 (Self-Induced Conflict) through + 14 (Translation Invalidation Conflict). */ + return (_TEXASRU_EXTRACT_BITS (texasru, 14, 4)) ? 1 : 0; +} + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_is_failure_persistent(void* const TM_buff) +{ + texasru_t texasru = *_TEXASRU_PTR (TM_buff); + return _TEXASRU_FAILURE_PERSISTENT (texasru); +} + +extern __inline long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_failure_address(void* const TM_buff) +{ + return *_TFIAR_PTR (TM_buff); +} + +extern __inline long long +__attribute__ ((__gnu_inline__, __always_inline__, __artificial__)) +__TM_failure_code(void* const TM_buff) +{ + return *_TEXASR_PTR (TM_buff); +} + +#ifdef __cplusplus +} +#endif + +#endif /* __powerpc__ */ + +#ifdef __s390__ + +#include <stdint.h> + +/* These intrinsics are being made available for compatibility with + the IBM XL compiler. For documentation please see the "z/OS XL + C/C++ Programming Guide" publically available on the web. */ + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_simple_begin () +{ + return __builtin_tbegin_nofloat (0); +} + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_begin (void* const tdb) +{ + return __builtin_tbegin_nofloat (tdb); +} + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_end () +{ + return __builtin_tend (); +} + +static __inline void __attribute__((__always_inline__)) +__TM_abort () +{ + return __builtin_tabort (_HTM_FIRST_USER_ABORT_CODE); +} + +static __inline void __attribute__((__always_inline__, __nodebug__)) +__TM_named_abort (unsigned char const code) +{ + return __builtin_tabort ((int)_HTM_FIRST_USER_ABORT_CODE + code); +} + +static __inline void __attribute__((__always_inline__, __nodebug__)) +__TM_non_transactional_store (void* const addr, long long const value) +{ + __builtin_non_tx_store ((uint64_t*)addr, (uint64_t)value); +} + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_nesting_depth (void* const tdb_ptr) +{ + int depth = __builtin_tx_nesting_depth (); + struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr; + + if (depth != 0) + return depth; + + if (tdb->format != 1) + return 0; + return tdb->nesting_depth; +} + +/* Transaction failure diagnostics */ + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_is_user_abort (void* const tdb_ptr) +{ + struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr; + + if (tdb->format != 1) + return 0; + + return !!(tdb->abort_code >= _HTM_FIRST_USER_ABORT_CODE); +} + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_is_named_user_abort (void* const tdb_ptr, unsigned char* code) +{ + struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr; + + if (tdb->format != 1) + return 0; + + if (tdb->abort_code >= _HTM_FIRST_USER_ABORT_CODE) + { + *code = tdb->abort_code - _HTM_FIRST_USER_ABORT_CODE; + return 1; + } + return 0; +} + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_is_illegal (void* const tdb_ptr) +{ + struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr; + + return (tdb->format == 1 + && (tdb->abort_code == 4 /* unfiltered program interruption */ + || tdb->abort_code == 11 /* restricted instruction */)); +} + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_is_footprint_exceeded (void* const tdb_ptr) +{ + struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr; + + return (tdb->format == 1 + && (tdb->abort_code == 7 /* fetch overflow */ + || tdb->abort_code == 8 /* store overflow */)); +} + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_is_nested_too_deep (void* const tdb_ptr) +{ + struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr; + + return tdb->format == 1 && tdb->abort_code == 13; /* depth exceeded */ +} + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_is_conflict (void* const tdb_ptr) +{ + struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr; + + return (tdb->format == 1 + && (tdb->abort_code == 9 /* fetch conflict */ + || tdb->abort_code == 10 /* store conflict */)); +} + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_is_failure_persistent (long const result) +{ + return result == _HTM_TBEGIN_PERSISTENT; +} + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_failure_address (void* const tdb_ptr) +{ + struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr; + return tdb->atia; +} + +static __inline long __attribute__((__always_inline__, __nodebug__)) +__TM_failure_code (void* const tdb_ptr) +{ + struct __htm_tdb *tdb = (struct __htm_tdb*)tdb_ptr; + + return tdb->abort_code; +} + +#endif /* __s390__ */ + +#endif /* __HTMXLINTRIN_H */ diff --git a/contrib/llvm/tools/clang/lib/Headers/immintrin.h b/contrib/llvm/tools/clang/lib/Headers/immintrin.h index 2400fea..ac7d54a 100644 --- a/contrib/llvm/tools/clang/lib/Headers/immintrin.h +++ b/contrib/llvm/tools/clang/lib/Headers/immintrin.h @@ -88,10 +88,18 @@ #include <avx512bwintrin.h> #endif +#ifdef __AVX512DQ__ +#include <avx512dqintrin.h> +#endif + #if defined (__AVX512VL__) && defined (__AVX512BW__) #include <avx512vlbwintrin.h> #endif +#if defined (__AVX512VL__) && defined (__AVX512DQ__) +#include <avx512vldqintrin.h> +#endif + #ifdef __AVX512ER__ #include <avx512erintrin.h> #endif diff --git a/contrib/llvm/tools/clang/lib/Headers/module.modulemap b/contrib/llvm/tools/clang/lib/Headers/module.modulemap index 062464e..ac5876f 100644 --- a/contrib/llvm/tools/clang/lib/Headers/module.modulemap +++ b/contrib/llvm/tools/clang/lib/Headers/module.modulemap @@ -49,7 +49,7 @@ module _Builtin_intrinsics [system] [extern_c] { explicit module sse { requires sse export mmx - export * // note: for hackish <emmintrin.h> dependency + export sse2 // note: for hackish <emmintrin.h> dependency header "xmmintrin.h" } @@ -169,6 +169,19 @@ module _Builtin_intrinsics [system] [extern_c] { header "__wmmintrin_pclmul.h" } } + + explicit module systemz { + requires systemz + export * + + header "s390intrin.h" + + explicit module htm { + requires htm + header "htmintrin.h" + header "htmxlintrin.h" + } + } } module _Builtin_stddef_max_align_t [system] [extern_c] { diff --git a/contrib/llvm/tools/clang/lib/Headers/s390intrin.h b/contrib/llvm/tools/clang/lib/Headers/s390intrin.h new file mode 100644 index 0000000..b209895 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/Headers/s390intrin.h @@ -0,0 +1,35 @@ +/*===---- s390intrin.h - SystemZ intrinsics --------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __S390INTRIN_H +#define __S390INTRIN_H + +#ifndef __s390__ +#error "<s390intrin.h> is for s390 only" +#endif + +#ifdef __HTM__ +#include <htmintrin.h> +#endif + +#endif /* __S390INTRIN_H*/ diff --git a/contrib/llvm/tools/clang/lib/Headers/stdatomic.h b/contrib/llvm/tools/clang/lib/Headers/stdatomic.h index e3c3476..e037987 100644 --- a/contrib/llvm/tools/clang/lib/Headers/stdatomic.h +++ b/contrib/llvm/tools/clang/lib/Headers/stdatomic.h @@ -71,7 +71,7 @@ typedef enum memory_order { /* 7.17.4 Fences */ -// These should be provided by the libc implementation. +/* These should be provided by the libc implementation. */ void atomic_thread_fence(memory_order); void atomic_signal_fence(memory_order); @@ -164,7 +164,7 @@ typedef struct atomic_flag { atomic_bool _Value; } atomic_flag; #define ATOMIC_FLAG_INIT { 0 } -// These should be provided by the libc implementation. +/* These should be provided by the libc implementation. */ #ifdef __cplusplus bool atomic_flag_test_and_set(volatile atomic_flag *); bool atomic_flag_test_and_set_explicit(volatile atomic_flag *, memory_order); diff --git a/contrib/llvm/tools/clang/lib/Headers/unwind.h b/contrib/llvm/tools/clang/lib/Headers/unwind.h index 90aca16..303d792 100644 --- a/contrib/llvm/tools/clang/lib/Headers/unwind.h +++ b/contrib/llvm/tools/clang/lib/Headers/unwind.h @@ -235,9 +235,9 @@ void *_Unwind_FindEnclosingFunction(void *); #ifdef __APPLE__ _Unwind_Ptr _Unwind_GetDataRelBase(struct _Unwind_Context *) - __attribute__((unavailable)); + __attribute__((__unavailable__)); _Unwind_Ptr _Unwind_GetTextRelBase(struct _Unwind_Context *) - __attribute__((unavailable)); + __attribute__((__unavailable__)); /* Darwin-specific functions */ void __register_frame(const void *); @@ -251,15 +251,15 @@ struct dwarf_eh_bases { void *_Unwind_Find_FDE(const void *, struct dwarf_eh_bases *); void __register_frame_info_bases(const void *, void *, void *, void *) - __attribute__((unavailable)); -void __register_frame_info(const void *, void *) __attribute__((unavailable)); + __attribute__((__unavailable__)); +void __register_frame_info(const void *, void *) __attribute__((__unavailable__)); void __register_frame_info_table_bases(const void *, void*, void *, void *) - __attribute__((unavailable)); + __attribute__((__unavailable__)); void __register_frame_info_table(const void *, void *) - __attribute__((unavailable)); -void __register_frame_table(const void *) __attribute__((unavailable)); -void __deregister_frame_info(const void *) __attribute__((unavailable)); -void __deregister_frame_info_bases(const void *)__attribute__((unavailable)); + __attribute__((__unavailable__)); +void __register_frame_table(const void *) __attribute__((__unavailable__)); +void __deregister_frame_info(const void *) __attribute__((__unavailable__)); +void __deregister_frame_info_bases(const void *)__attribute__((__unavailable__)); #else diff --git a/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h index d1afe81..3a6b95e 100644 --- a/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h +++ b/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h @@ -994,7 +994,7 @@ do { \ #define _m_ _mm_ /* Ugly hack for backwards-compatibility (compatible with gcc) */ -#ifdef __SSE2__ +#if defined(__SSE2__) && !__has_feature(modules) #include <emmintrin.h> #endif diff --git a/contrib/llvm/tools/clang/lib/Index/SimpleFormatContext.h b/contrib/llvm/tools/clang/lib/Index/SimpleFormatContext.h index 080a4ad..b884214 100644 --- a/contrib/llvm/tools/clang/lib/Index/SimpleFormatContext.h +++ b/contrib/llvm/tools/clang/lib/Index/SimpleFormatContext.h @@ -44,8 +44,6 @@ public: Diagnostics->setClient(new IgnoringDiagConsumer, true); } - ~SimpleFormatContext() { } - FileID createInMemoryFile(StringRef Name, StringRef Content) { std::unique_ptr<llvm::MemoryBuffer> Source = llvm::MemoryBuffer::getMemBuffer(Content); diff --git a/contrib/llvm/tools/clang/lib/Index/USRGeneration.cpp b/contrib/llvm/tools/clang/lib/Index/USRGeneration.cpp index baa166e..8cdd283 100644 --- a/contrib/llvm/tools/clang/lib/Index/USRGeneration.cpp +++ b/contrib/llvm/tools/clang/lib/Index/USRGeneration.cpp @@ -847,7 +847,7 @@ bool clang::index::generateUSRForDecl(const Decl *D, return UG.ignoreResults(); } -bool clang::index::generateUSRForMacro(const MacroDefinition *MD, +bool clang::index::generateUSRForMacro(const MacroDefinitionRecord *MD, const SourceManager &SM, SmallVectorImpl<char> &Buf) { // Don't generate USRs for things with invalid locations. diff --git a/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp b/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp index d6b255f..ad7d344 100644 --- a/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp +++ b/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp @@ -18,6 +18,7 @@ #include "clang/Lex/HeaderSearchOptions.h" #include "clang/Lex/LexDiagnostic.h" #include "clang/Lex/Lexer.h" +#include "clang/Lex/Preprocessor.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/Hashing.h" #include "llvm/ADT/SmallString.h" @@ -149,7 +150,7 @@ std::string HeaderSearch::getModuleFileName(StringRef ModuleName, SmallString<128> HashStr; llvm::APInt(64, size_t(Hash)).toStringUnsigned(HashStr, /*Radix*/36); - llvm::sys::path::append(Result, ModuleName + "-" + HashStr.str() + ".pcm"); + llvm::sys::path::append(Result, ModuleName + "-" + HashStr + ".pcm"); } return Result.str().str(); } @@ -297,7 +298,7 @@ const FileEntry *DirectoryLookup::LookupFile( RelativePath->append(Filename.begin(), Filename.end()); } - return getFileAndSuggestModule(HS, TmpDir.str(), getDir(), + return getFileAndSuggestModule(HS, TmpDir, getDir(), isSystemHeaderDirectory(), SuggestedModule); } @@ -438,7 +439,7 @@ const FileEntry *DirectoryLookup::DoFrameworkLookup( HS.IncrementFrameworkLookupCount(); // If the framework dir doesn't exist, we fail. - const DirectoryEntry *Dir = FileMgr.getDirectory(FrameworkName.str()); + const DirectoryEntry *Dir = FileMgr.getDirectory(FrameworkName); if (!Dir) return nullptr; // Otherwise, if it does, remember that this is the right direntry for this @@ -450,7 +451,7 @@ const FileEntry *DirectoryLookup::DoFrameworkLookup( if (getDirCharacteristic() == SrcMgr::C_User) { SmallString<1024> SystemFrameworkMarker(FrameworkName); SystemFrameworkMarker += ".system_framework"; - if (llvm::sys::fs::exists(SystemFrameworkMarker.str())) { + if (llvm::sys::fs::exists(SystemFrameworkMarker)) { CacheEntry.IsUserSpecifiedSystemFramework = true; } } @@ -476,7 +477,7 @@ const FileEntry *DirectoryLookup::DoFrameworkLookup( } FrameworkName.append(Filename.begin()+SlashPos+1, Filename.end()); - const FileEntry *FE = FileMgr.getFile(FrameworkName.str(), + const FileEntry *FE = FileMgr.getFile(FrameworkName, /*openFile=*/!SuggestedModule); if (!FE) { // Check "/System/Library/Frameworks/Cocoa.framework/PrivateHeaders/file.h" @@ -487,7 +488,7 @@ const FileEntry *DirectoryLookup::DoFrameworkLookup( SearchPath->insert(SearchPath->begin()+OrigSize, Private, Private+strlen(Private)); - FE = FileMgr.getFile(FrameworkName.str(), /*openFile=*/!SuggestedModule); + FE = FileMgr.getFile(FrameworkName, /*openFile=*/!SuggestedModule); } // If we found the header and are allowed to suggest a module, do so now. @@ -594,7 +595,13 @@ const FileEntry *HeaderSearch::LookupFile( RelativePath->append(Filename.begin(), Filename.end()); } // Otherwise, just return the file. - return FileMgr.getFile(Filename, /*openFile=*/true); + const FileEntry *File = FileMgr.getFile(Filename, /*openFile=*/true); + if (File && SuggestedModule) { + // If there is a module that corresponds to this header, suggest it. + hasModuleMap(Filename, File->getDir(), /*SystemHeaderDir*/false); + *SuggestedModule = findModuleForHeader(File); + } + return File; } // This is the header that MSVC's header search would have found. @@ -628,7 +635,7 @@ const FileEntry *HeaderSearch::LookupFile( bool IncluderIsSystemHeader = Includer && getFileInfo(Includer).DirInfo != SrcMgr::C_User; if (const FileEntry *FE = getFileAndSuggestModule( - *this, TmpDir.str(), IncluderAndDir.second, + *this, TmpDir, IncluderAndDir.second, IncluderIsSystemHeader, SuggestedModule)) { if (!Includer) { assert(First && "only first includer can have no file"); @@ -865,7 +872,7 @@ LookupSubframeworkHeader(StringRef Filename, ++NumSubFrameworkLookups; // If the framework dir doesn't exist, we fail. - const DirectoryEntry *Dir = FileMgr.getDirectory(FrameworkName.str()); + const DirectoryEntry *Dir = FileMgr.getDirectory(FrameworkName); if (!Dir) return nullptr; // Otherwise, if it does, remember that this is the right direntry for this @@ -890,7 +897,7 @@ LookupSubframeworkHeader(StringRef Filename, } HeadersFilename.append(Filename.begin()+SlashPos+1, Filename.end()); - if (!(FE = FileMgr.getFile(HeadersFilename.str(), /*openFile=*/true))) { + if (!(FE = FileMgr.getFile(HeadersFilename, /*openFile=*/true))) { // Check ".../Frameworks/HIToolbox.framework/PrivateHeaders/HIToolbox.h" HeadersFilename = FrameworkName; @@ -902,7 +909,7 @@ LookupSubframeworkHeader(StringRef Filename, } HeadersFilename.append(Filename.begin()+SlashPos+1, Filename.end()); - if (!(FE = FileMgr.getFile(HeadersFilename.str(), /*openFile=*/true))) + if (!(FE = FileMgr.getFile(HeadersFilename, /*openFile=*/true))) return nullptr; } @@ -1016,7 +1023,9 @@ void HeaderSearch::MarkFileModuleHeader(const FileEntry *FE, HFI.setHeaderRole(Role); } -bool HeaderSearch::ShouldEnterIncludeFile(const FileEntry *File, bool isImport){ +bool HeaderSearch::ShouldEnterIncludeFile(Preprocessor &PP, + const FileEntry *File, + bool isImport) { ++NumIncluded; // Count # of attempted #includes. // Get information about this file. @@ -1041,7 +1050,7 @@ bool HeaderSearch::ShouldEnterIncludeFile(const FileEntry *File, bool isImport){ // if the macro that guards it is defined, we know the #include has no effect. if (const IdentifierInfo *ControllingMacro = FileInfo.getControllingMacro(ExternalLookup)) - if (ControllingMacro->hasMacroDefinition()) { + if (PP.isMacroDefined(ControllingMacro)) { ++NumMultiIncludeFileOptzn; return false; } @@ -1067,7 +1076,7 @@ StringRef HeaderSearch::getUniqueFrameworkName(StringRef Framework) { bool HeaderSearch::hasModuleMap(StringRef FileName, const DirectoryEntry *Root, bool IsSystem) { - if (!enabledModules() || !LangOpts.ModulesImplicitMaps) + if (!HSOpts->ModuleMaps || !LangOpts.ModulesImplicitMaps) return false; SmallVector<const DirectoryEntry *, 2> FixUpDirectories; @@ -1284,7 +1293,7 @@ void HeaderSearch::collectAllModules(SmallVectorImpl<Module *> &Modules) { DirNative); // Search each of the ".framework" directories to load them as modules. - for (llvm::sys::fs::directory_iterator Dir(DirNative.str(), EC), DirEnd; + for (llvm::sys::fs::directory_iterator Dir(DirNative, EC), DirEnd; Dir != DirEnd && !EC; Dir.increment(EC)) { if (llvm::sys::path::extension(Dir->path()) != ".framework") continue; @@ -1351,10 +1360,12 @@ void HeaderSearch::loadSubdirectoryModuleMaps(DirectoryLookup &SearchDir) { std::error_code EC; SmallString<128> DirNative; llvm::sys::path::native(SearchDir.getDir()->getName(), DirNative); - for (llvm::sys::fs::directory_iterator Dir(DirNative.str(), EC), DirEnd; + for (llvm::sys::fs::directory_iterator Dir(DirNative, EC), DirEnd; Dir != DirEnd && !EC; Dir.increment(EC)) { - loadModuleMapFile(Dir->path(), SearchDir.isSystemHeaderDirectory(), - SearchDir.isFramework()); + bool IsFramework = llvm::sys::path::extension(Dir->path()) == ".framework"; + if (IsFramework == SearchDir.isFramework()) + loadModuleMapFile(Dir->path(), SearchDir.isSystemHeaderDirectory(), + SearchDir.isFramework()); } SearchDir.setSearchedAllModuleMaps(true); diff --git a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp index ca5252e..3f89ea6 100644 --- a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp +++ b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp @@ -143,14 +143,8 @@ Lexer::Lexer(SourceLocation fileloc, const LangOptions &langOpts, /// range will outlive it, so it doesn't take ownership of it. Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *FromFile, const SourceManager &SM, const LangOptions &langOpts) - : FileLoc(SM.getLocForStartOfFile(FID)), LangOpts(langOpts) { - - InitLexer(FromFile->getBufferStart(), FromFile->getBufferStart(), - FromFile->getBufferEnd()); - - // We *are* in raw mode. - LexingRawMode = true; -} + : Lexer(SM.getLocForStartOfFile(FID), langOpts, FromFile->getBufferStart(), + FromFile->getBufferStart(), FromFile->getBufferEnd()) {} /// Create_PragmaLexer: Lexer constructor - Create a new lexer object for /// _Pragma expansion. This has a variety of magic semantics that this method @@ -1860,7 +1854,7 @@ bool Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) { char C = getAndAdvanceChar(CurPtr, Result); while (C != '>') { // Skip escaped characters. - if (C == '\\') { + if (C == '\\' && CurPtr < BufferEnd) { // Skip the escaped character. getAndAdvanceChar(CurPtr, Result); } else if (C == '\n' || C == '\r' || // Newline. diff --git a/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp b/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp index 03331fb..aed9164 100644 --- a/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp +++ b/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp @@ -144,7 +144,8 @@ static unsigned ProcessCharEscape(const char *ThisTokBegin, int CharVal = llvm::hexDigitValue(ThisTokBuf[0]); if (CharVal == -1) break; // About to shift out a digit? - Overflow |= (ResultChar & 0xF0000000) ? true : false; + if (ResultChar & 0xF0000000) + Overflow = true; ResultChar <<= 4; ResultChar |= CharVal; } @@ -596,7 +597,8 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling, if (isFloat) break; // LF invalid. // Check for long long. The L's need to be adjacent and the same case. - if (s+1 != ThisTokEnd && s[1] == s[0]) { + if (s[1] == s[0]) { + assert(s + 1 < ThisTokEnd && "didn't maximally munch?"); if (isFPConstant) break; // long long invalid for floats. isLongLong = true; ++s; // Eat both of them. @@ -610,54 +612,45 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling, if (isLong || isLongLong || MicrosoftInteger) break; - // Allow i8, i16, i32, i64, and i128. - if (s + 1 != ThisTokEnd) { + if (!isFPConstant) { + // Allow i8, i16, i32, i64, and i128. switch (s[1]) { - case '8': - if (isFPConstant) break; - s += 2; // i8 suffix - MicrosoftInteger = 8; - break; - case '1': - if (isFPConstant) break; - if (s + 2 == ThisTokEnd) break; - if (s[2] == '6') { - s += 3; // i16 suffix - MicrosoftInteger = 16; - } - else if (s[2] == '2') { - if (s + 3 == ThisTokEnd) break; - if (s[3] == '8') { - s += 4; // i128 suffix - MicrosoftInteger = 128; - } - } - break; - case '3': - if (isFPConstant) break; - if (s + 2 == ThisTokEnd) break; - if (s[2] == '2') { - s += 3; // i32 suffix - MicrosoftInteger = 32; - } - break; - case '6': - if (isFPConstant) break; - if (s + 2 == ThisTokEnd) break; - if (s[2] == '4') { - s += 3; // i64 suffix - MicrosoftInteger = 64; - } - break; - default: - break; - } - if (MicrosoftInteger) + case '8': + s += 2; // i8 suffix + MicrosoftInteger = 8; + break; + case '1': + if (s[2] == '6') { + s += 3; // i16 suffix + MicrosoftInteger = 16; + } else if (s[2] == '2' && s[3] == '8') { + s += 4; // i128 suffix + MicrosoftInteger = 128; + } + break; + case '3': + if (s[2] == '2') { + s += 3; // i32 suffix + MicrosoftInteger = 32; + } + break; + case '6': + if (s[2] == '4') { + s += 3; // i64 suffix + MicrosoftInteger = 64; + } break; + default: + break; + } + } + if (MicrosoftInteger) { + assert(s <= ThisTokEnd && "didn't maximally munch?"); + break; } } // "i", "if", and "il" are user-defined suffixes in C++1y. - if (PP.getLangOpts().CPlusPlus14 && *s == 'i') + if (*s == 'i' && PP.getLangOpts().CPlusPlus14) break; // fall through. case 'j': @@ -755,11 +748,11 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) { s++; int c1 = s[0]; - int c2 = s[1]; // Handle a hex number like 0x1234. - if ((c1 == 'x' || c1 == 'X') && (isHexDigit(c2) || c2 == '.')) { + if ((c1 == 'x' || c1 == 'X') && (isHexDigit(s[1]) || s[1] == '.')) { s++; + assert(s < ThisTokEnd && "didn't maximally munch?"); radix = 16; DigitsBegin = s; s = SkipHexDigits(s); @@ -811,7 +804,7 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) { } // Handle simple binary numbers 0b01010 - if ((c1 == 'b' || c1 == 'B') && (c2 == '0' || c2 == '1')) { + if ((c1 == 'b' || c1 == 'B') && (s[1] == '0' || s[1] == '1')) { // 0b101010 is a C++1y / GCC extension. PP.Diag(TokLoc, PP.getLangOpts().CPlusPlus14 @@ -820,6 +813,7 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) { ? diag::ext_binary_literal_cxx14 : diag::ext_binary_literal); ++s; + assert(s < ThisTokEnd && "didn't maximally munch?"); radix = 2; DigitsBegin = s; s = SkipBinaryDigits(s); diff --git a/contrib/llvm/tools/clang/lib/Lex/MacroArgs.cpp b/contrib/llvm/tools/clang/lib/Lex/MacroArgs.cpp index 9967f3f..1c1979d 100644 --- a/contrib/llvm/tools/clang/lib/Lex/MacroArgs.cpp +++ b/contrib/llvm/tools/clang/lib/Lex/MacroArgs.cpp @@ -133,12 +133,11 @@ bool MacroArgs::ArgNeedsPreexpansion(const Token *ArgTok, // If there are no identifiers in the argument list, or if the identifiers are // known to not be macros, pre-expansion won't modify it. for (; ArgTok->isNot(tok::eof); ++ArgTok) - if (IdentifierInfo *II = ArgTok->getIdentifierInfo()) { - if (II->hasMacroDefinition() && PP.getMacroInfo(II)->isEnabled()) + if (IdentifierInfo *II = ArgTok->getIdentifierInfo()) + if (II->hasMacroDefinition()) // Return true even though the macro could be a function-like macro - // without a following '(' token. + // without a following '(' token, or could be disabled, or not visible. return true; - } return false; } diff --git a/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp b/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp index 5416886..109b6c1 100644 --- a/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp +++ b/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp @@ -218,13 +218,9 @@ void MacroDirective::dump() const { if (auto *Prev = getPrevious()) Out << " prev " << Prev; if (IsFromPCH) Out << " from_pch"; - if (IsImported) Out << " imported"; - if (IsAmbiguous) Out << " ambiguous"; - if (IsPublic) - Out << " public"; - else if (isa<VisibilityMacroDirective>(this)) - Out << " private"; + if (isa<VisibilityMacroDirective>(this)) + Out << (IsPublic ? " public" : " private"); if (auto *DMD = dyn_cast<DefMacroDirective>(this)) { if (auto *Info = DMD->getInfo()) { @@ -234,3 +230,12 @@ void MacroDirective::dump() const { } Out << "\n"; } + +ModuleMacro *ModuleMacro::create(Preprocessor &PP, Module *OwningModule, + IdentifierInfo *II, MacroInfo *Macro, + ArrayRef<ModuleMacro *> Overrides) { + void *Mem = PP.getPreprocessorAllocator().Allocate( + sizeof(ModuleMacro) + sizeof(ModuleMacro *) * Overrides.size(), + llvm::alignOf<ModuleMacro>()); + return new (Mem) ModuleMacro(OwningModule, II, Macro, Overrides); +} diff --git a/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp b/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp index ef322d8..4129183 100644 --- a/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp +++ b/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp @@ -89,7 +89,9 @@ ModuleMap::ModuleMap(SourceManager &SourceMgr, DiagnosticsEngine &Diags, HeaderSearch &HeaderInfo) : SourceMgr(SourceMgr), Diags(Diags), LangOpts(LangOpts), Target(Target), HeaderInfo(HeaderInfo), BuiltinIncludeDir(nullptr), - CompilingModule(nullptr), SourceModule(nullptr) {} + CompilingModule(nullptr), SourceModule(nullptr), NumCreatedModules(0) { + MMapLangOpts.LineComment = true; +} ModuleMap::~ModuleMap() { for (llvm::StringMap<Module *>::iterator I = Modules.begin(), @@ -203,35 +205,32 @@ ModuleMap::findHeaderInUmbrellaDirs(const FileEntry *File, return KnownHeader(); } -// Returns true if RequestingModule directly uses RequestedModule. -static bool directlyUses(const Module *RequestingModule, - const Module *RequestedModule) { - return std::find(RequestingModule->DirectUses.begin(), - RequestingModule->DirectUses.end(), - RequestedModule) != RequestingModule->DirectUses.end(); -} - static bool violatesPrivateInclude(Module *RequestingModule, const FileEntry *IncFileEnt, ModuleMap::ModuleHeaderRole Role, Module *RequestedModule) { bool IsPrivateRole = Role & ModuleMap::PrivateHeader; #ifndef NDEBUG - // Check for consistency between the module header role - // as obtained from the lookup and as obtained from the module. - // This check is not cheap, so enable it only for debugging. - bool IsPrivate = false; - SmallVectorImpl<Module::Header> *HeaderList[] = - {&RequestedModule->Headers[Module::HK_Private], - &RequestedModule->Headers[Module::HK_PrivateTextual]}; - for (auto *Hdrs : HeaderList) - IsPrivate |= - std::find_if(Hdrs->begin(), Hdrs->end(), [&](const Module::Header &H) { - return H.Entry == IncFileEnt; - }) != Hdrs->end(); - assert(IsPrivate == IsPrivateRole && "inconsistent headers and roles"); + if (IsPrivateRole) { + // Check for consistency between the module header role + // as obtained from the lookup and as obtained from the module. + // This check is not cheap, so enable it only for debugging. + bool IsPrivate = false; + SmallVectorImpl<Module::Header> *HeaderList[] = { + &RequestedModule->Headers[Module::HK_Private], + &RequestedModule->Headers[Module::HK_PrivateTextual]}; + for (auto *Hs : HeaderList) + IsPrivate |= + std::find_if(Hs->begin(), Hs->end(), [&](const Module::Header &H) { + return H.Entry == IncFileEnt; + }) != Hs->end(); + assert((!IsPrivateRole || IsPrivate) && "inconsistent headers and roles"); + } #endif return IsPrivateRole && + // FIXME: Should we map RequestingModule to its top-level module here + // too? This check is redundant with the isSubModuleOf check in + // diagnoseHeaderInclusion. RequestedModule->getTopLevelModule() != RequestingModule; } @@ -259,7 +258,8 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule, if (Known != Headers.end()) { for (const KnownHeader &Header : Known->second) { // If 'File' is part of 'RequestingModule' we can definitely include it. - if (Header.getModule() == RequestingModule) + if (Header.getModule() && + Header.getModule()->isSubModuleOf(RequestingModule)) return; // Remember private headers for later printing of a diagnostic. @@ -272,7 +272,7 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule, // If uses need to be specified explicitly, we are only allowed to return // modules that are explicitly used by the requesting module. if (RequestingModule && LangOpts.ModulesDeclUse && - !directlyUses(RequestingModule, Header.getModule())) { + !RequestingModule->directlyUses(Header.getModule())) { NotUsed = Header.getModule(); continue; } @@ -286,14 +286,14 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule, // We have found a header, but it is private. if (Private) { - Diags.Report(FilenameLoc, diag::error_use_of_private_header_outside_module) + Diags.Report(FilenameLoc, diag::warn_use_of_private_header_outside_module) << Filename; return; } // We have found a module, but we don't use it. if (NotUsed) { - Diags.Report(FilenameLoc, diag::error_undeclared_use_of_module) + Diags.Report(FilenameLoc, diag::err_undeclared_use_of_module) << RequestingModule->getFullModuleName() << Filename; return; } @@ -304,7 +304,7 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule, // At this point, only non-modular includes remain. if (LangOpts.ModulesStrictDeclUse) { - Diags.Report(FilenameLoc, diag::error_undeclared_use_of_module) + Diags.Report(FilenameLoc, diag::err_undeclared_use_of_module) << RequestingModule->getFullModuleName() << Filename; } else if (RequestingModule) { diag::kind DiagID = RequestingModule->getTopLevelModule()->IsFramework ? @@ -314,6 +314,22 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule, } } +static bool isBetterKnownHeader(const ModuleMap::KnownHeader &New, + const ModuleMap::KnownHeader &Old) { + // Prefer a public header over a private header. + if ((New.getRole() & ModuleMap::PrivateHeader) != + (Old.getRole() & ModuleMap::PrivateHeader)) + return !(New.getRole() & ModuleMap::PrivateHeader); + + // Prefer a non-textual header over a textual header. + if ((New.getRole() & ModuleMap::TextualHeader) != + (Old.getRole() & ModuleMap::TextualHeader)) + return !(New.getRole() & ModuleMap::TextualHeader); + + // Don't have a reason to choose between these. Just keep the first one. + return false; +} + ModuleMap::KnownHeader ModuleMap::findModuleForHeader(const FileEntry *File, Module *RequestingModule, @@ -345,11 +361,10 @@ ModuleMap::findModuleForHeader(const FileEntry *File, // If uses need to be specified explicitly, we are only allowed to return // modules that are explicitly used by the requesting module. if (RequestingModule && LangOpts.ModulesDeclUse && - !directlyUses(RequestingModule, I->getModule())) + !RequestingModule->directlyUses(I->getModule())) continue; - // Prefer a public header over a private header. - if (!Result || (Result.getRole() & ModuleMap::PrivateHeader)) + if (!Result || isBetterKnownHeader(*I, Result)) Result = *I; } return MakeResult(Result); @@ -548,7 +563,7 @@ ModuleMap::findOrCreateModule(StringRef Name, Module *Parent, bool IsFramework, // Create a new module with this name. Module *Result = new Module(Name, SourceLocation(), Parent, - IsFramework, IsExplicit); + IsFramework, IsExplicit, NumCreatedModules++); if (LangOpts.CurrentModule == Name) { SourceModule = Result; SourceModuleName = Name; @@ -678,7 +693,8 @@ Module *ModuleMap::inferFrameworkModule(StringRef ModuleName, return nullptr; Module *Result = new Module(ModuleName, SourceLocation(), Parent, - /*IsFramework=*/true, /*IsExplicit=*/false); + /*IsFramework=*/true, /*IsExplicit=*/false, + NumCreatedModules++); InferredModuleAllowedBy[Result] = ModuleMapFile; Result->IsInferred = true; if (LangOpts.CurrentModule == ModuleName) { @@ -689,14 +705,16 @@ Module *ModuleMap::inferFrameworkModule(StringRef ModuleName, Result->IsSystem |= Attrs.IsSystem; Result->IsExternC |= Attrs.IsExternC; Result->ConfigMacrosExhaustive |= Attrs.IsExhaustive; + Result->Directory = FrameworkDir; if (!Parent) Modules[ModuleName] = Result; // umbrella header "umbrella-header-name" - Result->Umbrella = UmbrellaHeader; - Headers[UmbrellaHeader].push_back(KnownHeader(Result, NormalHeader)); - UmbrellaDirs[UmbrellaHeader->getDir()] = Result; + // + // The "Headers/" component of the name is implied because this is + // a framework module. + setUmbrellaHeader(Result, UmbrellaHeader, ModuleName + ".h"); // export * Result->Exports.push_back(Module::ExportDecl(nullptr, true)); @@ -711,8 +729,7 @@ Module *ModuleMap::inferFrameworkModule(StringRef ModuleName, = StringRef(FrameworkDir->getName()); llvm::sys::path::append(SubframeworksDirName, "Frameworks"); llvm::sys::path::native(SubframeworksDirName); - for (llvm::sys::fs::directory_iterator - Dir(SubframeworksDirName.str(), EC), DirEnd; + for (llvm::sys::fs::directory_iterator Dir(SubframeworksDirName, EC), DirEnd; Dir != DirEnd && !EC; Dir.increment(EC)) { if (!StringRef(Dir->path()).endswith(".framework")) continue; @@ -758,14 +775,18 @@ Module *ModuleMap::inferFrameworkModule(StringRef ModuleName, return Result; } -void ModuleMap::setUmbrellaHeader(Module *Mod, const FileEntry *UmbrellaHeader){ +void ModuleMap::setUmbrellaHeader(Module *Mod, const FileEntry *UmbrellaHeader, + Twine NameAsWritten) { Headers[UmbrellaHeader].push_back(KnownHeader(Mod, NormalHeader)); Mod->Umbrella = UmbrellaHeader; + Mod->UmbrellaAsWritten = NameAsWritten.str(); UmbrellaDirs[UmbrellaHeader->getDir()] = Mod; } -void ModuleMap::setUmbrellaDir(Module *Mod, const DirectoryEntry *UmbrellaDir) { +void ModuleMap::setUmbrellaDir(Module *Mod, const DirectoryEntry *UmbrellaDir, + Twine NameAsWritten) { Mod->Umbrella = UmbrellaDir; + Mod->UmbrellaAsWritten = NameAsWritten.str(); UmbrellaDirs[UmbrellaDir] = Mod; } @@ -850,50 +871,44 @@ void ModuleMap::dump() { } bool ModuleMap::resolveExports(Module *Mod, bool Complain) { - bool HadError = false; - for (unsigned I = 0, N = Mod->UnresolvedExports.size(); I != N; ++I) { - Module::ExportDecl Export = resolveExport(Mod, Mod->UnresolvedExports[I], - Complain); + auto Unresolved = std::move(Mod->UnresolvedExports); + Mod->UnresolvedExports.clear(); + for (auto &UE : Unresolved) { + Module::ExportDecl Export = resolveExport(Mod, UE, Complain); if (Export.getPointer() || Export.getInt()) Mod->Exports.push_back(Export); else - HadError = true; + Mod->UnresolvedExports.push_back(UE); } - Mod->UnresolvedExports.clear(); - return HadError; + return !Mod->UnresolvedExports.empty(); } bool ModuleMap::resolveUses(Module *Mod, bool Complain) { - bool HadError = false; - for (unsigned I = 0, N = Mod->UnresolvedDirectUses.size(); I != N; ++I) { - Module *DirectUse = - resolveModuleId(Mod->UnresolvedDirectUses[I], Mod, Complain); + auto Unresolved = std::move(Mod->UnresolvedDirectUses); + Mod->UnresolvedDirectUses.clear(); + for (auto &UDU : Unresolved) { + Module *DirectUse = resolveModuleId(UDU, Mod, Complain); if (DirectUse) Mod->DirectUses.push_back(DirectUse); else - HadError = true; + Mod->UnresolvedDirectUses.push_back(UDU); } - Mod->UnresolvedDirectUses.clear(); - return HadError; + return !Mod->UnresolvedDirectUses.empty(); } bool ModuleMap::resolveConflicts(Module *Mod, bool Complain) { - bool HadError = false; - for (unsigned I = 0, N = Mod->UnresolvedConflicts.size(); I != N; ++I) { - Module *OtherMod = resolveModuleId(Mod->UnresolvedConflicts[I].Id, - Mod, Complain); - if (!OtherMod) { - HadError = true; - continue; - } - - Module::Conflict Conflict; - Conflict.Other = OtherMod; - Conflict.Message = Mod->UnresolvedConflicts[I].Message; - Mod->Conflicts.push_back(Conflict); - } + auto Unresolved = std::move(Mod->UnresolvedConflicts); Mod->UnresolvedConflicts.clear(); - return HadError; + for (auto &UC : Unresolved) { + if (Module *OtherMod = resolveModuleId(UC.Id, Mod, Complain)) { + Module::Conflict Conflict; + Conflict.Other = OtherMod; + Conflict.Message = UC.Message; + Mod->Conflicts.push_back(Conflict); + } else + Mod->UnresolvedConflicts.push_back(UC); + } + return !Mod->UnresolvedConflicts.empty(); } Module *ModuleMap::inferModuleFromLocation(FullSourceLoc Loc) { @@ -1565,7 +1580,7 @@ void ModuleMapParser::parseExternModuleDecl() { if (llvm::sys::path::is_relative(FileNameRef)) { ModuleMapFileName += Directory->getName(); llvm::sys::path::append(ModuleMapFileName, FileName); - FileNameRef = ModuleMapFileName.str(); + FileNameRef = ModuleMapFileName; } if (const FileEntry *File = SourceMgr.getFileManager().getFile(FileNameRef)) Map.parseModuleMapFile( @@ -1712,7 +1727,7 @@ void ModuleMapParser::parseHeaderDecl(MMToken::TokenKind LeadingToken, // Check whether this file is in the public headers. llvm::sys::path::append(RelativePathName, "Headers", Header.FileName); - llvm::sys::path::append(FullPathName, RelativePathName.str()); + llvm::sys::path::append(FullPathName, RelativePathName); File = SourceMgr.getFileManager().getFile(FullPathName); if (!File) { @@ -1722,13 +1737,13 @@ void ModuleMapParser::parseHeaderDecl(MMToken::TokenKind LeadingToken, FullPathName.resize(FullPathLength); llvm::sys::path::append(RelativePathName, "PrivateHeaders", Header.FileName); - llvm::sys::path::append(FullPathName, RelativePathName.str()); + llvm::sys::path::append(FullPathName, RelativePathName); File = SourceMgr.getFileManager().getFile(FullPathName); } } else { // Lookup for normal headers. llvm::sys::path::append(RelativePathName, Header.FileName); - llvm::sys::path::append(FullPathName, RelativePathName.str()); + llvm::sys::path::append(FullPathName, RelativePathName); File = SourceMgr.getFileManager().getFile(FullPathName); // If this is a system module with a top-level header, this header @@ -1744,7 +1759,13 @@ void ModuleMapParser::parseHeaderDecl(MMToken::TokenKind LeadingToken, // If Clang supplies this header but the underlying system does not, // just silently swap in our builtin version. Otherwise, we'll end // up adding both (later). - if (!File && BuiltinFile) { + // + // For local visibility, entirely replace the system file with our + // one and textually include the system one. We need to pass macros + // from our header to the system one if we #include_next it. + // + // FIXME: Can we do this in all cases? + if (BuiltinFile && (!File || Map.LangOpts.ModulesLocalVisibility)) { File = BuiltinFile; RelativePathName = BuiltinPathName; BuiltinFile = nullptr; @@ -1764,7 +1785,7 @@ void ModuleMapParser::parseHeaderDecl(MMToken::TokenKind LeadingToken, HadError = true; } else { // Record this umbrella header. - Map.setUmbrellaHeader(ActiveModule, File); + Map.setUmbrellaHeader(ActiveModule, File, RelativePathName.str()); } } else if (LeadingToken == MMToken::ExcludeKeyword) { Module::Header H = {RelativePathName.str(), File}; @@ -1846,7 +1867,7 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) { } // Record this umbrella directory. - Map.setUmbrellaDir(ActiveModule, Dir); + Map.setUmbrellaDir(ActiveModule, Dir, DirName); } /// \brief Parse a module export declaration. @@ -1897,18 +1918,21 @@ void ModuleMapParser::parseExportDecl() { ActiveModule->UnresolvedExports.push_back(Unresolved); } -/// \brief Parse a module uses declaration. +/// \brief Parse a module use declaration. /// -/// uses-declaration: -/// 'uses' wildcard-module-id +/// use-declaration: +/// 'use' wildcard-module-id void ModuleMapParser::parseUseDecl() { assert(Tok.is(MMToken::UseKeyword)); - consumeToken(); + auto KWLoc = consumeToken(); // Parse the module-id. ModuleId ParsedModuleId; parseModuleId(ParsedModuleId); - ActiveModule->UnresolvedDirectUses.push_back(ParsedModuleId); + if (ActiveModule->Parent) + Diags.Report(KWLoc, diag::err_mmap_use_decl_submodule); + else + ActiveModule->UnresolvedDirectUses.push_back(ParsedModuleId); } /// \brief Parse a link declaration. diff --git a/contrib/llvm/tools/clang/lib/Lex/PPConditionalDirectiveRecord.cpp b/contrib/llvm/tools/clang/lib/Lex/PPConditionalDirectiveRecord.cpp index 99b87a0..12a7784 100644 --- a/contrib/llvm/tools/clang/lib/Lex/PPConditionalDirectiveRecord.cpp +++ b/contrib/llvm/tools/clang/lib/Lex/PPConditionalDirectiveRecord.cpp @@ -84,14 +84,14 @@ void PPConditionalDirectiveRecord::If(SourceLocation Loc, void PPConditionalDirectiveRecord::Ifdef(SourceLocation Loc, const Token &MacroNameTok, - const MacroDirective *MD) { + const MacroDefinition &MD) { addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back())); CondDirectiveStack.push_back(Loc); } void PPConditionalDirectiveRecord::Ifndef(SourceLocation Loc, const Token &MacroNameTok, - const MacroDirective *MD) { + const MacroDefinition &MD) { addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back())); CondDirectiveStack.push_back(Loc); } diff --git a/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp b/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp index bf0ce72..ec06e79 100644 --- a/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp +++ b/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp @@ -62,26 +62,14 @@ MacroInfo *Preprocessor::AllocateDeserializedMacroInfo(SourceLocation L, return MI; } -DefMacroDirective * -Preprocessor::AllocateDefMacroDirective(MacroInfo *MI, SourceLocation Loc, - unsigned ImportedFromModuleID, - ArrayRef<unsigned> Overrides) { - unsigned NumExtra = (ImportedFromModuleID ? 1 : 0) + Overrides.size(); - return new (BP.Allocate(sizeof(DefMacroDirective) + - sizeof(unsigned) * NumExtra, - llvm::alignOf<DefMacroDirective>())) - DefMacroDirective(MI, Loc, ImportedFromModuleID, Overrides); +DefMacroDirective *Preprocessor::AllocateDefMacroDirective(MacroInfo *MI, + SourceLocation Loc) { + return new (BP) DefMacroDirective(MI, Loc); } UndefMacroDirective * -Preprocessor::AllocateUndefMacroDirective(SourceLocation UndefLoc, - unsigned ImportedFromModuleID, - ArrayRef<unsigned> Overrides) { - unsigned NumExtra = (ImportedFromModuleID ? 1 : 0) + Overrides.size(); - return new (BP.Allocate(sizeof(UndefMacroDirective) + - sizeof(unsigned) * NumExtra, - llvm::alignOf<UndefMacroDirective>())) - UndefMacroDirective(UndefLoc, ImportedFromModuleID, Overrides); +Preprocessor::AllocateUndefMacroDirective(SourceLocation UndefLoc) { + return new (BP) UndefMacroDirective(UndefLoc); } VisibilityMacroDirective * @@ -182,11 +170,13 @@ bool Preprocessor::CheckMacroName(Token &MacroNameTok, MacroUse isDefineUndef, return Diag(MacroNameTok, diag::err_defined_macro_name); } - if (isDefineUndef == MU_Undef && II->hasMacroDefinition() && - getMacroInfo(II)->isBuiltinMacro()) { - // Warn if undefining "__LINE__" and other builtins, per C99 6.10.8/4 - // and C++ [cpp.predefined]p4], but allow it as an extension. - Diag(MacroNameTok, diag::ext_pp_undef_builtin_macro); + if (isDefineUndef == MU_Undef) { + auto *MI = getMacroInfo(II); + if (MI && MI->isBuiltinMacro()) { + // Warn if undefining "__LINE__" and other builtins, per C99 6.10.8/4 + // and C++ [cpp.predefined]p4], but allow it as an extension. + Diag(MacroNameTok, diag::ext_pp_undef_builtin_macro); + } } // If defining/undefining reserved identifier or a keyword, we need to issue @@ -585,16 +575,16 @@ void Preprocessor::PTHSkipExcludedConditionalBlock() { } } -Module *Preprocessor::getModuleForLocation(SourceLocation FilenameLoc) { +Module *Preprocessor::getModuleForLocation(SourceLocation Loc) { ModuleMap &ModMap = HeaderInfo.getModuleMap(); - if (SourceMgr.isInMainFile(FilenameLoc)) { + if (SourceMgr.isInMainFile(Loc)) { if (Module *CurMod = getCurrentModule()) return CurMod; // Compiling a module. return HeaderInfo.getModuleMap().SourceModule; // Compiling a source. } // Try to determine the module of the include directive. // FIXME: Look into directly passing the FileEntry from LookupFile instead. - FileID IDOfIncl = SourceMgr.getFileID(SourceMgr.getExpansionLoc(FilenameLoc)); + FileID IDOfIncl = SourceMgr.getFileID(SourceMgr.getExpansionLoc(Loc)); if (const FileEntry *EntryOfIncl = SourceMgr.getFileEntryForID(IDOfIncl)) { // The include comes from a file. return ModMap.findModuleForHeader(EntryOfIncl).getModule(); @@ -605,6 +595,11 @@ Module *Preprocessor::getModuleForLocation(SourceLocation FilenameLoc) { } } +Module *Preprocessor::getModuleContainingLocation(SourceLocation Loc) { + return HeaderInfo.getModuleMap().inferModuleFromLocation( + FullSourceLoc(Loc, SourceMgr)); +} + const FileEntry *Preprocessor::LookupFile( SourceLocation FilenameLoc, StringRef Filename, @@ -1233,7 +1228,7 @@ void Preprocessor::HandleUserDiagnosticDirective(Token &Tok, // Find the first non-whitespace character, so that we can make the // diagnostic more succinct. - StringRef Msg = Message.str().ltrim(" "); + StringRef Msg = StringRef(Message).ltrim(" "); if (isWarning) Diag(Tok, diag::pp_hash_warning) << Msg; @@ -1290,7 +1285,7 @@ void Preprocessor::HandleMacroPublicDirective(Token &Tok) { IdentifierInfo *II = MacroNameTok.getIdentifierInfo(); // Okay, we finally have a valid identifier to undef. - MacroDirective *MD = getMacroDirective(II); + MacroDirective *MD = getLocalMacroDirective(II); // If the macro is not defined, this is an error. if (!MD) { @@ -1317,7 +1312,7 @@ void Preprocessor::HandleMacroPrivateDirective(Token &Tok) { IdentifierInfo *II = MacroNameTok.getIdentifierInfo(); // Okay, we finally have a valid identifier to undef. - MacroDirective *MD = getMacroDirective(II); + MacroDirective *MD = getLocalMacroDirective(II); // If the macro is not defined, this is an error. if (!MD) { @@ -1444,6 +1439,8 @@ bool Preprocessor::ConcatenateIncludeName(SmallString<128> &FilenameBuffer, static void EnterAnnotationToken(Preprocessor &PP, SourceLocation Begin, SourceLocation End, tok::TokenKind Kind, void *AnnotationVal) { + // FIXME: Produce this as the current token directly, rather than + // allocating a new token for it. Token *Tok = new Token[1]; Tok[0].startToken(); Tok[0].setKind(Kind); @@ -1453,6 +1450,51 @@ static void EnterAnnotationToken(Preprocessor &PP, PP.EnterTokenStream(Tok, 1, true, true); } +/// \brief Produce a diagnostic informing the user that a #include or similar +/// was implicitly treated as a module import. +static void diagnoseAutoModuleImport( + Preprocessor &PP, SourceLocation HashLoc, Token &IncludeTok, + ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> Path, + SourceLocation PathEnd) { + assert(PP.getLangOpts().ObjC2 && "no import syntax available"); + + SmallString<128> PathString; + for (unsigned I = 0, N = Path.size(); I != N; ++I) { + if (I) + PathString += '.'; + PathString += Path[I].first->getName(); + } + int IncludeKind = 0; + + switch (IncludeTok.getIdentifierInfo()->getPPKeywordID()) { + case tok::pp_include: + IncludeKind = 0; + break; + + case tok::pp_import: + IncludeKind = 1; + break; + + case tok::pp_include_next: + IncludeKind = 2; + break; + + case tok::pp___include_macros: + IncludeKind = 3; + break; + + default: + llvm_unreachable("unknown include directive kind"); + } + + CharSourceRange ReplaceRange(SourceRange(HashLoc, PathEnd), + /*IsTokenRange=*/false); + PP.Diag(HashLoc, diag::warn_auto_module_import) + << IncludeKind << PathString + << FixItHint::CreateReplacement(ReplaceRange, + ("@import " + PathString + ";").str()); +} + /// HandleIncludeDirective - The "\#include" tokens have just been read, read /// the file to be included from the lexer, then include it! This is a common /// routine with functionality shared between \#include, \#include_next and @@ -1491,7 +1533,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc, FilenameBuffer.push_back('<'); if (ConcatenateIncludeName(FilenameBuffer, End)) return; // Found <eod> but no ">"? Diagnostic already emitted. - Filename = FilenameBuffer.str(); + Filename = FilenameBuffer; CharEnd = End.getLocWithOffset(1); break; default: @@ -1563,8 +1605,8 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc, Callbacks ? &SearchPath : nullptr, Callbacks ? &RelativePath : nullptr, HeaderInfo.getHeaderSearchOpts().ModuleMaps ? &SuggestedModule : nullptr); - if (Callbacks) { - if (!File) { + if (!File) { + if (Callbacks) { // Give the clients a chance to recover. SmallString<128> RecoveryPath; if (Callbacks->FileNotFound(Filename, RecoveryPath)) { @@ -1584,18 +1626,7 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc, } } } - - if (!SuggestedModule || !getLangOpts().Modules) { - // Notify the callback object that we've seen an inclusion directive. - Callbacks->InclusionDirective(HashLoc, IncludeTok, - LangOpts.MSVCCompat ? NormalizedPath.c_str() - : Filename, - isAngled, FilenameRange, File, SearchPath, - RelativePath, /*ImportedModule=*/nullptr); - } - } - if (!File) { if (!SuppressIncludeNotFoundError) { // If the file could not be located and it was included via angle // brackets, we can attempt a lookup as though it were a quoted path to @@ -1616,19 +1647,27 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc, FixItHint::CreateReplacement(Range, "\"" + Filename.str() + "\""); } } + // If the file is still not found, just go with the vanilla diagnostic if (!File) Diag(FilenameTok, diag::err_pp_file_not_found) << Filename; } - if (!File) - return; } - // If we are supposed to import a module rather than including the header, - // do so now. - if (SuggestedModule && getLangOpts().Modules && + // Should we enter the source file? Set to false if either the source file is + // known to have no effect beyond its effect on module visibility -- that is, + // if it's got an include guard that is already defined or is a modular header + // we've imported or already built. + bool ShouldEnter = true; + + // Determine whether we should try to import the module for this #include, if + // there is one. Don't do so if precompiled module support is disabled or we + // are processing this module textually (because we're building the module). + if (File && SuggestedModule && getLangOpts().Modules && SuggestedModule.getModule()->getTopLevelModuleName() != - getLangOpts().ImplementationOfModule) { + getLangOpts().CurrentModule && + SuggestedModule.getModule()->getTopLevelModuleName() != + getLangOpts().ImplementationOfModule) { // Compute the module access path corresponding to this module. // FIXME: Should we have a second loadModule() overload to avoid this // extra lookup step? @@ -1639,111 +1678,57 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc, std::reverse(Path.begin(), Path.end()); // Warn that we're replacing the include/import with a module import. - SmallString<128> PathString; - for (unsigned I = 0, N = Path.size(); I != N; ++I) { - if (I) - PathString += '.'; - PathString += Path[I].first->getName(); - } - int IncludeKind = 0; - - switch (IncludeTok.getIdentifierInfo()->getPPKeywordID()) { - case tok::pp_include: - IncludeKind = 0; - break; - - case tok::pp_import: - IncludeKind = 1; - break; - - case tok::pp_include_next: - IncludeKind = 2; - break; - - case tok::pp___include_macros: - IncludeKind = 3; - break; - - default: - llvm_unreachable("unknown include directive kind"); - } - - // Determine whether we are actually building the module that this - // include directive maps to. - bool BuildingImportedModule - = Path[0].first->getName() == getLangOpts().CurrentModule; - - if (!BuildingImportedModule && getLangOpts().ObjC2) { - // If we're not building the imported module, warn that we're going - // to automatically turn this inclusion directive into a module import. - // We only do this in Objective-C, where we have a module-import syntax. - CharSourceRange ReplaceRange(SourceRange(HashLoc, CharEnd), - /*IsTokenRange=*/false); - Diag(HashLoc, diag::warn_auto_module_import) - << IncludeKind << PathString - << FixItHint::CreateReplacement(ReplaceRange, - "@import " + PathString.str().str() + ";"); - } + // We only do this in Objective-C, where we have a module-import syntax. + if (getLangOpts().ObjC2) + diagnoseAutoModuleImport(*this, HashLoc, IncludeTok, Path, CharEnd); - // Load the module. Only make macros visible. We'll make the declarations + // Load the module to import its macros. We'll make the declarations // visible when the parser gets here. - Module::NameVisibilityKind Visibility = Module::MacrosVisible; - ModuleLoadResult Imported - = TheModuleLoader.loadModule(IncludeTok.getLocation(), Path, Visibility, - /*IsIncludeDirective=*/true); + // FIXME: Pass SuggestedModule in here rather than converting it to a path + // and making the module loader convert it back again. + ModuleLoadResult Imported = TheModuleLoader.loadModule( + IncludeTok.getLocation(), Path, Module::Hidden, + /*IsIncludeDirective=*/true); assert((Imported == nullptr || Imported == SuggestedModule.getModule()) && "the imported module is different than the suggested one"); - if (!Imported && hadModuleLoaderFatalFailure()) { - // With a fatal failure in the module loader, we abort parsing. - Token &Result = IncludeTok; - if (CurLexer) { - Result.startToken(); - CurLexer->FormTokenWithChars(Result, CurLexer->BufferEnd, tok::eof); - CurLexer->cutOffLexing(); - } else { - assert(CurPTHLexer && "#include but no current lexer set!"); - CurPTHLexer->getEOF(Result); - } - return; - } - - // If this header isn't part of the module we're building, we're done. - if (!BuildingImportedModule && Imported) { - if (Callbacks) { - Callbacks->InclusionDirective(HashLoc, IncludeTok, Filename, isAngled, - FilenameRange, File, - SearchPath, RelativePath, Imported); - } - - if (IncludeKind != 3) { - // Let the parser know that we hit a module import, and it should - // make the module visible. - // FIXME: Produce this as the current token directly, rather than - // allocating a new token for it. - EnterAnnotationToken(*this, HashLoc, End, tok::annot_module_include, - Imported); + if (Imported) + ShouldEnter = false; + else if (Imported.isMissingExpected()) { + // We failed to find a submodule that we assumed would exist (because it + // was in the directory of an umbrella header, for instance), but no + // actual module exists for it (because the umbrella header is + // incomplete). Treat this as a textual inclusion. + SuggestedModule = ModuleMap::KnownHeader(); + } else { + // We hit an error processing the import. Bail out. + if (hadModuleLoaderFatalFailure()) { + // With a fatal failure in the module loader, we abort parsing. + Token &Result = IncludeTok; + if (CurLexer) { + Result.startToken(); + CurLexer->FormTokenWithChars(Result, CurLexer->BufferEnd, tok::eof); + CurLexer->cutOffLexing(); + } else { + assert(CurPTHLexer && "#include but no current lexer set!"); + CurPTHLexer->getEOF(Result); + } } return; } - - // If we failed to find a submodule that we expected to find, we can - // continue. Otherwise, there's an error in the included file, so we - // don't want to include it. - if (!BuildingImportedModule && !Imported.isMissingExpected()) { - return; - } } - if (Callbacks && SuggestedModule) { - // We didn't notify the callback object that we've seen an inclusion - // directive before. Now that we are parsing the include normally and not - // turning it to a module import, notify the callback object. - Callbacks->InclusionDirective(HashLoc, IncludeTok, Filename, isAngled, - FilenameRange, File, - SearchPath, RelativePath, - /*ImportedModule=*/nullptr); + if (Callbacks) { + // Notify the callback object that we've seen an inclusion directive. + Callbacks->InclusionDirective( + HashLoc, IncludeTok, + LangOpts.MSVCCompat ? NormalizedPath.c_str() : Filename, isAngled, + FilenameRange, File, SearchPath, RelativePath, + ShouldEnter ? nullptr : SuggestedModule.getModule()); } + + if (!File) + return; // The #included file will be considered to be a system header if either it is // in a system include directory, or if the #includer is a system include @@ -1752,11 +1737,28 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc, std::max(HeaderInfo.getFileDirFlavor(File), SourceMgr.getFileCharacteristic(FilenameTok.getLocation())); + // FIXME: If we have a suggested module, and we've already visited this file, + // don't bother entering it again. We know it has no further effect. + // Ask HeaderInfo if we should enter this #include file. If not, #including // this file will have no effect. - if (!HeaderInfo.ShouldEnterIncludeFile(File, isImport)) { + if (ShouldEnter && + !HeaderInfo.ShouldEnterIncludeFile(*this, File, isImport)) { + ShouldEnter = false; if (Callbacks) Callbacks->FileSkipped(*File, FilenameTok, FileCharacter); + } + + // If we don't need to enter the file, stop now. + if (!ShouldEnter) { + // If this is a module import, make it visible if needed. + if (auto *M = SuggestedModule.getModule()) { + makeModuleVisible(M, HashLoc); + + if (IncludeTok.getIdentifierInfo()->getPPKeywordID() != + tok::pp___include_macros) + EnterAnnotationToken(*this, HashLoc, End, tok::annot_module_include, M); + } return; } @@ -1769,26 +1771,24 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc, FileID FID = SourceMgr.createFileID(File, IncludePos, FileCharacter); assert(!FID.isInvalid() && "Expected valid file ID"); - // Determine if we're switching to building a new submodule, and which one. - ModuleMap::KnownHeader BuildingModule; - if (getLangOpts().Modules && !getLangOpts().CurrentModule.empty()) { - Module *RequestingModule = getModuleForLocation(FilenameLoc); - BuildingModule = - HeaderInfo.getModuleMap().findModuleForHeader(File, RequestingModule); - } - // If all is good, enter the new file! if (EnterSourceFile(FID, CurDir, FilenameTok.getLocation())) return; - // If we're walking into another part of the same module, let the parser - // know that any future declarations are within that other submodule. - if (BuildingModule) { + // Determine if we're switching to building a new submodule, and which one. + if (auto *M = SuggestedModule.getModule()) { assert(!CurSubmodule && "should not have marked this as a module yet"); - CurSubmodule = BuildingModule.getModule(); + CurSubmodule = M; - EnterAnnotationToken(*this, HashLoc, End, tok::annot_module_begin, - CurSubmodule); + // Let the macro handling code know that any future macros are within + // the new submodule. + EnterSubmodule(M, HashLoc); + + // Let the parser know that any future declarations are within the new + // submodule. + // FIXME: There's no point doing this if we're handling a #__include_macros + // directive. + EnterAnnotationToken(*this, HashLoc, End, tok::annot_module_begin, M); } } @@ -2290,9 +2290,9 @@ void Preprocessor::HandleUndefDirective(Token &UndefTok) { // Check to see if this is the last token on the #undef line. CheckEndOfDirective("undef"); - // Okay, we finally have a valid identifier to undef. - MacroDirective *MD = getMacroDirective(MacroNameTok.getIdentifierInfo()); - const MacroInfo *MI = MD ? MD->getMacroInfo() : nullptr; + // Okay, we have a valid identifier to undef. + auto *II = MacroNameTok.getIdentifierInfo(); + auto MD = getMacroDefinition(II); // If the callbacks want to know, tell them about the macro #undef. // Note: no matter if the macro was defined or not. @@ -2300,6 +2300,7 @@ void Preprocessor::HandleUndefDirective(Token &UndefTok) { Callbacks->MacroUndefined(MacroNameTok, MD); // If the macro is not defined, this is a noop undef, just return. + const MacroInfo *MI = MD.getMacroInfo(); if (!MI) return; @@ -2344,8 +2345,8 @@ void Preprocessor::HandleIfdefDirective(Token &Result, bool isIfndef, CheckEndOfDirective(isIfndef ? "ifndef" : "ifdef"); IdentifierInfo *MII = MacroNameTok.getIdentifierInfo(); - MacroDirective *MD = getMacroDirective(MII); - MacroInfo *MI = MD ? MD->getMacroInfo() : nullptr; + auto MD = getMacroDefinition(MII); + MacroInfo *MI = MD.getMacroInfo(); if (CurPPLexer->getConditionalStackDepth() == 0) { // If the start of a top-level #ifdef and if the macro is not defined, diff --git a/contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp b/contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp index 9cf72cf..4451302 100644 --- a/contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp +++ b/contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp @@ -108,15 +108,13 @@ static bool EvaluateDefined(PPValue &Result, Token &PeekTok, DefinedTracker &DT, // Otherwise, we got an identifier, is it defined to something? IdentifierInfo *II = PeekTok.getIdentifierInfo(); - Result.Val = II->hasMacroDefinition(); - Result.Val.setIsUnsigned(false); // Result is signed intmax_t. + MacroDefinition Macro = PP.getMacroDefinition(II); + Result.Val = !!Macro; + Result.Val.setIsUnsigned(false); // Result is signed intmax_t. - MacroDirective *Macro = nullptr; // If there is a macro, mark it used. - if (Result.Val != 0 && ValueLive) { - Macro = PP.getMacroDirective(II); - PP.markMacroAsUsed(Macro->getMacroInfo()); - } + if (Result.Val != 0 && ValueLive) + PP.markMacroAsUsed(Macro.getMacroInfo()); // Save macro token for callback. Token macroToken(PeekTok); @@ -144,11 +142,7 @@ static bool EvaluateDefined(PPValue &Result, Token &PeekTok, DefinedTracker &DT, // Invoke the 'defined' callback. if (PPCallbacks *Callbacks = PP.getPPCallbacks()) { - MacroDirective *MD = Macro; - // Pass the MacroInfo for the macro name even if the value is dead. - if (!MD && Result.Val != 0) - MD = PP.getMacroDirective(II); - Callbacks->Defined(macroToken, MD, + Callbacks->Defined(macroToken, Macro, SourceRange(beginLoc, PeekTok.getLocation())); } @@ -310,7 +304,9 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT, // Set the value. Val = Literal.getValue(); // Set the signedness. UTF-16 and UTF-32 are always unsigned - if (!Literal.isUTF16() && !Literal.isUTF32()) + if (Literal.isWide()) + Val.setIsUnsigned(!TargetInfo::isTypeSigned(TI.getWCharType())); + else if (!Literal.isUTF16() && !Literal.isUTF32()) Val.setIsUnsigned(!PP.getLangOpts().CharIsSigned); if (Result.Val.getBitWidth() > Val.getBitWidth()) { @@ -732,8 +728,7 @@ static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec, /// EvaluateDirectiveExpression - Evaluate an integer constant expression that /// may occur after a #if or #elif directive. If the expression is equivalent /// to "!defined(X)" return X in IfNDefMacro. -bool Preprocessor:: -EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro) { +bool Preprocessor::EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro) { SaveAndRestore<bool> PPDir(ParsingIfOrElifDirective, true); // Save the current state of 'DisableMacroExpansion' and reset it to false. If // 'DisableMacroExpansion' is true, then we must be in a macro argument list diff --git a/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp b/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp index fb5e2b0..e68fb7d 100644 --- a/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp +++ b/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp @@ -309,7 +309,7 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) { } if (const IdentifierInfo *DefinedMacro = CurPPLexer->MIOpt.GetDefinedMacro()) { - if (!ControllingMacro->hasMacroDefinition() && + if (!isMacroDefined(ControllingMacro) && DefinedMacro != ControllingMacro && HeaderInfo.FirstTimeLexingFile(FE)) { @@ -400,6 +400,9 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) { CurLexer->FormTokenWithChars(Result, EndPos, tok::annot_module_end); Result.setAnnotationEndLoc(Result.getLocation()); Result.setAnnotationValue(CurSubmodule); + + // We're done with this submodule. + LeaveSubmodule(); } // We're done with the #included file. @@ -471,7 +474,7 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) { if (!getDiagnostics().isIgnored(diag::warn_uncovered_module_header, StartLoc)) { ModuleMap &ModMap = getHeaderSearchInfo().getModuleMap(); - const DirectoryEntry *Dir = Mod->getUmbrellaDir(); + const DirectoryEntry *Dir = Mod->getUmbrellaDir().Entry; vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem(); std::error_code EC; for (vfs::recursive_directory_iterator Entry(FS, Dir->getName(), EC), End; @@ -605,3 +608,126 @@ void Preprocessor::HandleMicrosoftCommentPaste(Token &Tok) { // preprocessor directive mode), so just return EOF as our token. assert(!FoundLexer && "Lexer should return EOD before EOF in PP mode"); } + +void Preprocessor::EnterSubmodule(Module *M, SourceLocation ImportLoc) { + if (!getLangOpts().ModulesLocalVisibility) { + // Just track that we entered this submodule. + BuildingSubmoduleStack.push_back( + BuildingSubmoduleInfo(M, ImportLoc, CurSubmoduleState)); + return; + } + + // Resolve as much of the module definition as we can now, before we enter + // one of its headers. + // FIXME: Can we enable Complain here? + // FIXME: Can we do this when local visibility is disabled? + ModuleMap &ModMap = getHeaderSearchInfo().getModuleMap(); + ModMap.resolveExports(M, /*Complain=*/false); + ModMap.resolveUses(M, /*Complain=*/false); + ModMap.resolveConflicts(M, /*Complain=*/false); + + // If this is the first time we've entered this module, set up its state. + auto R = Submodules.insert(std::make_pair(M, SubmoduleState())); + auto &State = R.first->second; + bool FirstTime = R.second; + if (FirstTime) { + // Determine the set of starting macros for this submodule; take these + // from the "null" module (the predefines buffer). + auto &StartingMacros = NullSubmoduleState.Macros; + + // Restore to the starting state. + // FIXME: Do this lazily, when each macro name is first referenced. + for (auto &Macro : StartingMacros) { + MacroState MS(Macro.second.getLatest()); + MS.setOverriddenMacros(*this, Macro.second.getOverriddenMacros()); + State.Macros.insert(std::make_pair(Macro.first, std::move(MS))); + } + } + + // Track that we entered this module. + BuildingSubmoduleStack.push_back( + BuildingSubmoduleInfo(M, ImportLoc, CurSubmoduleState)); + + // Switch to this submodule as the current submodule. + CurSubmoduleState = &State; + + // This module is visible to itself. + if (FirstTime) + makeModuleVisible(M, ImportLoc); +} + +void Preprocessor::LeaveSubmodule() { + auto &Info = BuildingSubmoduleStack.back(); + + Module *LeavingMod = Info.M; + SourceLocation ImportLoc = Info.ImportLoc; + + // Create ModuleMacros for any macros defined in this submodule. + for (auto &Macro : CurSubmoduleState->Macros) { + auto *II = const_cast<IdentifierInfo*>(Macro.first); + + // Find the starting point for the MacroDirective chain in this submodule. + MacroDirective *OldMD = nullptr; + if (getLangOpts().ModulesLocalVisibility) { + // FIXME: It'd be better to start at the state from when we most recently + // entered this submodule, but it doesn't really matter. + auto &PredefMacros = NullSubmoduleState.Macros; + auto PredefMacroIt = PredefMacros.find(Macro.first); + if (PredefMacroIt == PredefMacros.end()) + OldMD = nullptr; + else + OldMD = PredefMacroIt->second.getLatest(); + } + + // This module may have exported a new macro. If so, create a ModuleMacro + // representing that fact. + bool ExplicitlyPublic = false; + for (auto *MD = Macro.second.getLatest(); MD != OldMD; + MD = MD->getPrevious()) { + assert(MD && "broken macro directive chain"); + + // Stop on macros defined in other submodules we #included along the way. + // There's no point doing this if we're tracking local submodule + // visibility, since there can be no such directives in our list. + if (!getLangOpts().ModulesLocalVisibility) { + Module *Mod = getModuleContainingLocation(MD->getLocation()); + if (Mod != LeavingMod) + break; + } + + if (auto *VisMD = dyn_cast<VisibilityMacroDirective>(MD)) { + // The latest visibility directive for a name in a submodule affects + // all the directives that come before it. + if (VisMD->isPublic()) + ExplicitlyPublic = true; + else if (!ExplicitlyPublic) + // Private with no following public directive: not exported. + break; + } else { + MacroInfo *Def = nullptr; + if (DefMacroDirective *DefMD = dyn_cast<DefMacroDirective>(MD)) + Def = DefMD->getInfo(); + + // FIXME: Issue a warning if multiple headers for the same submodule + // define a macro, rather than silently ignoring all but the first. + bool IsNew; + // Don't bother creating a module macro if it would represent a #undef + // that doesn't override anything. + if (Def || !Macro.second.getOverriddenMacros().empty()) + addModuleMacro(LeavingMod, II, Def, + Macro.second.getOverriddenMacros(), IsNew); + break; + } + } + } + + // Put back the outer module's state, if we're tracking it. + if (getLangOpts().ModulesLocalVisibility) + CurSubmoduleState = Info.OuterSubmoduleState; + + BuildingSubmoduleStack.pop_back(); + + // A nested #include makes the included submodule visible. + if (!BuildingSubmoduleStack.empty() || !getLangOpts().ModulesLocalVisibility) + makeModuleVisible(LeavingMod, ImportLoc); +} diff --git a/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp b/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp index cd05d06..9046ad5 100644 --- a/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp +++ b/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp @@ -34,44 +34,234 @@ using namespace clang; MacroDirective * -Preprocessor::getMacroDirectiveHistory(const IdentifierInfo *II) const { - assert(II->hadMacroDefinition() && "Identifier has not been not a macro!"); - - macro_iterator Pos = Macros.find(II); - assert(Pos != Macros.end() && "Identifier macro info is missing!"); - return Pos->second; +Preprocessor::getLocalMacroDirectiveHistory(const IdentifierInfo *II) const { + if (!II->hadMacroDefinition()) + return nullptr; + auto Pos = CurSubmoduleState->Macros.find(II); + return Pos == CurSubmoduleState->Macros.end() ? nullptr + : Pos->second.getLatest(); } void Preprocessor::appendMacroDirective(IdentifierInfo *II, MacroDirective *MD){ assert(MD && "MacroDirective should be non-zero!"); assert(!MD->getPrevious() && "Already attached to a MacroDirective history."); - MacroDirective *&StoredMD = Macros[II]; - MD->setPrevious(StoredMD); - StoredMD = MD; - // Setup the identifier as having associated macro history. + MacroState &StoredMD = CurSubmoduleState->Macros[II]; + auto *OldMD = StoredMD.getLatest(); + MD->setPrevious(OldMD); + StoredMD.setLatest(MD); + StoredMD.overrideActiveModuleMacros(*this, II); + + // Set up the identifier as having associated macro history. II->setHasMacroDefinition(true); - if (!MD->isDefined()) + if (!MD->isDefined() && LeafModuleMacros.find(II) == LeafModuleMacros.end()) II->setHasMacroDefinition(false); - bool isImportedMacro = isa<DefMacroDirective>(MD) && - cast<DefMacroDirective>(MD)->isImported(); - if (II->isFromAST() && !isImportedMacro) + if (II->isFromAST()) II->setChangedSinceDeserialization(); } void Preprocessor::setLoadedMacroDirective(IdentifierInfo *II, MacroDirective *MD) { assert(II && MD); - MacroDirective *&StoredMD = Macros[II]; - assert(!StoredMD && + MacroState &StoredMD = CurSubmoduleState->Macros[II]; + assert(!StoredMD.getLatest() && "the macro history was modified before initializing it from a pch"); StoredMD = MD; // Setup the identifier as having associated macro history. II->setHasMacroDefinition(true); - if (!MD->isDefined()) + if (!MD->isDefined() && LeafModuleMacros.find(II) == LeafModuleMacros.end()) II->setHasMacroDefinition(false); } +ModuleMacro *Preprocessor::addModuleMacro(Module *Mod, IdentifierInfo *II, + MacroInfo *Macro, + ArrayRef<ModuleMacro *> Overrides, + bool &New) { + llvm::FoldingSetNodeID ID; + ModuleMacro::Profile(ID, Mod, II); + + void *InsertPos; + if (auto *MM = ModuleMacros.FindNodeOrInsertPos(ID, InsertPos)) { + New = false; + return MM; + } + + auto *MM = ModuleMacro::create(*this, Mod, II, Macro, Overrides); + ModuleMacros.InsertNode(MM, InsertPos); + + // Each overridden macro is now overridden by one more macro. + bool HidAny = false; + for (auto *O : Overrides) { + HidAny |= (O->NumOverriddenBy == 0); + ++O->NumOverriddenBy; + } + + // If we were the first overrider for any macro, it's no longer a leaf. + auto &LeafMacros = LeafModuleMacros[II]; + if (HidAny) { + LeafMacros.erase(std::remove_if(LeafMacros.begin(), LeafMacros.end(), + [](ModuleMacro *MM) { + return MM->NumOverriddenBy != 0; + }), + LeafMacros.end()); + } + + // The new macro is always a leaf macro. + LeafMacros.push_back(MM); + // The identifier now has defined macros (that may or may not be visible). + II->setHasMacroDefinition(true); + + New = true; + return MM; +} + +ModuleMacro *Preprocessor::getModuleMacro(Module *Mod, IdentifierInfo *II) { + llvm::FoldingSetNodeID ID; + ModuleMacro::Profile(ID, Mod, II); + + void *InsertPos; + return ModuleMacros.FindNodeOrInsertPos(ID, InsertPos); +} + +void Preprocessor::updateModuleMacroInfo(const IdentifierInfo *II, + ModuleMacroInfo &Info) { + assert(Info.ActiveModuleMacrosGeneration != + CurSubmoduleState->VisibleModules.getGeneration() && + "don't need to update this macro name info"); + Info.ActiveModuleMacrosGeneration = + CurSubmoduleState->VisibleModules.getGeneration(); + + auto Leaf = LeafModuleMacros.find(II); + if (Leaf == LeafModuleMacros.end()) { + // No imported macros at all: nothing to do. + return; + } + + Info.ActiveModuleMacros.clear(); + + // Every macro that's locally overridden is overridden by a visible macro. + llvm::DenseMap<ModuleMacro *, int> NumHiddenOverrides; + for (auto *O : Info.OverriddenMacros) + NumHiddenOverrides[O] = -1; + + // Collect all macros that are not overridden by a visible macro. + llvm::SmallVector<ModuleMacro *, 16> Worklist(Leaf->second.begin(), + Leaf->second.end()); + while (!Worklist.empty()) { + auto *MM = Worklist.pop_back_val(); + if (CurSubmoduleState->VisibleModules.isVisible(MM->getOwningModule())) { + // We only care about collecting definitions; undefinitions only act + // to override other definitions. + if (MM->getMacroInfo()) + Info.ActiveModuleMacros.push_back(MM); + } else { + for (auto *O : MM->overrides()) + if ((unsigned)++NumHiddenOverrides[O] == O->getNumOverridingMacros()) + Worklist.push_back(O); + } + } + // Our reverse postorder walk found the macros in reverse order. + std::reverse(Info.ActiveModuleMacros.begin(), Info.ActiveModuleMacros.end()); + + // Determine whether the macro name is ambiguous. + MacroInfo *MI = nullptr; + bool IsSystemMacro = true; + bool IsAmbiguous = false; + if (auto *MD = Info.MD) { + while (MD && isa<VisibilityMacroDirective>(MD)) + MD = MD->getPrevious(); + if (auto *DMD = dyn_cast_or_null<DefMacroDirective>(MD)) { + MI = DMD->getInfo(); + IsSystemMacro &= SourceMgr.isInSystemHeader(DMD->getLocation()); + } + } + for (auto *Active : Info.ActiveModuleMacros) { + auto *NewMI = Active->getMacroInfo(); + + // Before marking the macro as ambiguous, check if this is a case where + // both macros are in system headers. If so, we trust that the system + // did not get it wrong. This also handles cases where Clang's own + // headers have a different spelling of certain system macros: + // #define LONG_MAX __LONG_MAX__ (clang's limits.h) + // #define LONG_MAX 0x7fffffffffffffffL (system's limits.h) + // + // FIXME: Remove the defined-in-system-headers check. clang's limits.h + // overrides the system limits.h's macros, so there's no conflict here. + if (MI && NewMI != MI && + !MI->isIdenticalTo(*NewMI, *this, /*Syntactically=*/true)) + IsAmbiguous = true; + IsSystemMacro &= Active->getOwningModule()->IsSystem || + SourceMgr.isInSystemHeader(NewMI->getDefinitionLoc()); + MI = NewMI; + } + Info.IsAmbiguous = IsAmbiguous && !IsSystemMacro; +} + +void Preprocessor::dumpMacroInfo(const IdentifierInfo *II) { + ArrayRef<ModuleMacro*> Leaf; + auto LeafIt = LeafModuleMacros.find(II); + if (LeafIt != LeafModuleMacros.end()) + Leaf = LeafIt->second; + const MacroState *State = nullptr; + auto Pos = CurSubmoduleState->Macros.find(II); + if (Pos != CurSubmoduleState->Macros.end()) + State = &Pos->second; + + llvm::errs() << "MacroState " << State << " " << II->getNameStart(); + if (State && State->isAmbiguous(*this, II)) + llvm::errs() << " ambiguous"; + if (State && !State->getOverriddenMacros().empty()) { + llvm::errs() << " overrides"; + for (auto *O : State->getOverriddenMacros()) + llvm::errs() << " " << O->getOwningModule()->getFullModuleName(); + } + llvm::errs() << "\n"; + + // Dump local macro directives. + for (auto *MD = State ? State->getLatest() : nullptr; MD; + MD = MD->getPrevious()) { + llvm::errs() << " "; + MD->dump(); + } + + // Dump module macros. + llvm::DenseSet<ModuleMacro*> Active; + for (auto *MM : State ? State->getActiveModuleMacros(*this, II) : None) + Active.insert(MM); + llvm::DenseSet<ModuleMacro*> Visited; + llvm::SmallVector<ModuleMacro *, 16> Worklist(Leaf.begin(), Leaf.end()); + while (!Worklist.empty()) { + auto *MM = Worklist.pop_back_val(); + llvm::errs() << " ModuleMacro " << MM << " " + << MM->getOwningModule()->getFullModuleName(); + if (!MM->getMacroInfo()) + llvm::errs() << " undef"; + + if (Active.count(MM)) + llvm::errs() << " active"; + else if (!CurSubmoduleState->VisibleModules.isVisible( + MM->getOwningModule())) + llvm::errs() << " hidden"; + else if (MM->getMacroInfo()) + llvm::errs() << " overridden"; + + if (!MM->overrides().empty()) { + llvm::errs() << " overrides"; + for (auto *O : MM->overrides()) { + llvm::errs() << " " << O->getOwningModule()->getFullModuleName(); + if (Visited.insert(O).second) + Worklist.push_back(O); + } + } + llvm::errs() << "\n"; + if (auto *MI = MM->getMacroInfo()) { + llvm::errs() << " "; + MI->dump(); + llvm::errs() << "\n"; + } + } +} + /// RegisterBuiltinMacro - Register the specified identifier in the identifier /// table and mark it as a builtin macro to be expanded. static IdentifierInfo *RegisterBuiltinMacro(Preprocessor &PP, const char *Name){ @@ -97,7 +287,11 @@ void Preprocessor::RegisterBuiltinMacros() { Ident_Pragma = RegisterBuiltinMacro(*this, "_Pragma"); // C++ Standing Document Extensions. - Ident__has_cpp_attribute = RegisterBuiltinMacro(*this, "__has_cpp_attribute"); + if (LangOpts.CPlusPlus) + Ident__has_cpp_attribute = + RegisterBuiltinMacro(*this, "__has_cpp_attribute"); + else + Ident__has_cpp_attribute = nullptr; // GCC Extensions. Ident__BASE_FILE__ = RegisterBuiltinMacro(*this, "__BASE_FILE__"); @@ -156,10 +350,11 @@ static bool isTrivialSingleTokenExpansion(const MacroInfo *MI, // If the identifier is a macro, and if that macro is enabled, it may be // expanded so it's not a trivial expansion. - if (II->hasMacroDefinition() && PP.getMacroInfo(II)->isEnabled() && - // Fast expanding "#define X X" is ok, because X would be disabled. - II != MacroIdent) - return false; + if (auto *ExpansionMI = PP.getMacroInfo(II)) + if (ExpansionMI->isEnabled() && + // Fast expanding "#define X X" is ok, because X would be disabled. + II != MacroIdent) + return false; // If this is an object-like macro invocation, it is safe to trivially expand // it. @@ -222,10 +417,8 @@ bool Preprocessor::isNextPPTokenLParen() { /// HandleMacroExpandedIdentifier - If an identifier token is read that is to be /// expanded as a macro, handle it and return the next token as 'Identifier'. bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier, - MacroDirective *MD) { - MacroDirective::DefInfo Def = MD->getDefinition(); - assert(Def.isValid()); - MacroInfo *MI = Def.getMacroInfo(); + const MacroDefinition &M) { + MacroInfo *MI = M.getMacroInfo(); // If this is a macro expansion in the "#if !defined(x)" line for the file, // then the macro could expand to different things in other contexts, we need @@ -234,9 +427,9 @@ bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier, // If this is a builtin macro, like __LINE__ or _Pragma, handle it specially. if (MI->isBuiltinMacro()) { - if (Callbacks) Callbacks->MacroExpands(Identifier, MD, - Identifier.getLocation(), - /*Args=*/nullptr); + if (Callbacks) + Callbacks->MacroExpands(Identifier, M, Identifier.getLocation(), + /*Args=*/nullptr); ExpandBuiltinMacro(Identifier); return true; } @@ -283,9 +476,9 @@ bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier, // MacroExpands callbacks still happen in source order, queue this // callback to have it happen after the function macro callback. DelayedMacroExpandsCallbacks.push_back( - MacroExpandsInfo(Identifier, MD, ExpansionRange)); + MacroExpandsInfo(Identifier, M, ExpansionRange)); } else { - Callbacks->MacroExpands(Identifier, MD, ExpansionRange, Args); + Callbacks->MacroExpands(Identifier, M, ExpansionRange, Args); if (!DelayedMacroExpandsCallbacks.empty()) { for (unsigned i=0, e = DelayedMacroExpandsCallbacks.size(); i!=e; ++i) { MacroExpandsInfo &Info = DelayedMacroExpandsCallbacks[i]; @@ -299,20 +492,16 @@ bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier, } // If the macro definition is ambiguous, complain. - if (Def.getDirective()->isAmbiguous()) { + if (M.isAmbiguous()) { Diag(Identifier, diag::warn_pp_ambiguous_macro) << Identifier.getIdentifierInfo(); Diag(MI->getDefinitionLoc(), diag::note_pp_ambiguous_macro_chosen) << Identifier.getIdentifierInfo(); - for (MacroDirective::DefInfo PrevDef = Def.getPreviousDefinition(); - PrevDef && !PrevDef.isUndefined(); - PrevDef = PrevDef.getPreviousDefinition()) { - Diag(PrevDef.getMacroInfo()->getDefinitionLoc(), - diag::note_pp_ambiguous_macro_other) - << Identifier.getIdentifierInfo(); - if (!PrevDef.getDirective()->isAmbiguous()) - break; - } + M.forAllDefinitions([&](const MacroInfo *OtherMI) { + if (OtherMI != MI) + Diag(OtherMI->getDefinitionLoc(), diag::note_pp_ambiguous_macro_other) + << Identifier.getIdentifierInfo(); + }); } // If we started lexing a macro, enter the macro expansion body. @@ -598,7 +787,7 @@ MacroArgs *Preprocessor::ReadFunctionLikeMacroArgs(Token &MacroName, // If this is a comment token in the argument list and we're just in // -C mode (not -CC mode), discard the comment. continue; - } else if (Tok.getIdentifierInfo() != nullptr) { + } else if (!Tok.isAnnotation() && Tok.getIdentifierInfo() != nullptr) { // Reading macro arguments can cause macros that we are currently // expanding from to be popped off the expansion stack. Doing so causes // them to be reenabled for expansion. Here we record whether any @@ -868,6 +1057,7 @@ static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) { .Case("attribute_analyzer_noreturn", true) .Case("attribute_availability", true) .Case("attribute_availability_with_message", true) + .Case("attribute_availability_app_extension", true) .Case("attribute_cf_returns_not_retained", true) .Case("attribute_cf_returns_retained", true) .Case("attribute_deprecated_with_message", true) @@ -912,7 +1102,8 @@ static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) { .Case("objc_dictionary_literals", LangOpts.ObjC2) .Case("objc_boxed_expressions", LangOpts.ObjC2) .Case("arc_cf_code_audited", true) - .Case("objc_bridge_id", LangOpts.ObjC2) + .Case("objc_bridge_id", true) + .Case("objc_bridge_id_on_typedefs", true) // C11 features .Case("c_alignas", LangOpts.C11) .Case("c_alignof", LangOpts.C11) @@ -1050,6 +1241,7 @@ static bool HasExtension(const Preprocessor &PP, const IdentifierInfo *II) { .Case("cxx_range_for", LangOpts.CPlusPlus) .Case("cxx_reference_qualified_functions", LangOpts.CPlusPlus) .Case("cxx_rvalue_references", LangOpts.CPlusPlus) + .Case("cxx_variadic_templates", LangOpts.CPlusPlus) // C++1y features supported by other languages as extensions. .Case("cxx_binary_literals", true) .Case("cxx_init_captures", LangOpts.CPlusPlus11) @@ -1071,6 +1263,9 @@ static bool EvaluateHasIncludeCommon(Token &Tok, // These expressions are only allowed within a preprocessor directive. if (!PP.isParsingIfOrElifDirective()) { PP.Diag(LParenLoc, diag::err_pp_directive_required) << II->getName(); + // Return a valid identifier token. + assert(Tok.is(tok::identifier)); + Tok.setIdentifierInfo(II); return false; } @@ -1129,7 +1324,7 @@ static bool EvaluateHasIncludeCommon(Token &Tok, Tok.setKind(tok::eod); return false; // Found <eod> but no ">"? Diagnostic already emitted. } - Filename = FilenameBuffer.str(); + Filename = FilenameBuffer; break; default: PP.Diag(Tok.getLocation(), diag::err_pp_expects_filename); @@ -1179,7 +1374,7 @@ static bool EvaluateHasIncludeNext(Token &Tok, // __has_include_next is like __has_include, except that we start // searching after the current found directory. If we can't do this, // issue a diagnostic. - // FIXME: Factor out duplication wiht + // FIXME: Factor out duplication with // Preprocessor::HandleIncludeNextDirective. const DirectoryLookup *Lookup = PP.GetCurDirLookup(); const FileEntry *LookupFromFile = nullptr; @@ -1312,7 +1507,7 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) { if (PLoc.isValid()) { FN += PLoc.getFilename(); Lexer::Stringify(FN); - OS << '"' << FN.str() << '"'; + OS << '"' << FN << '"'; } Tok.setKind(tok::string_literal); } else if (II == Ident__DATE__) { @@ -1459,9 +1654,11 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) { Value = EvaluateHasInclude(Tok, II, *this); else Value = EvaluateHasIncludeNext(Tok, II, *this); + + if (Tok.isNot(tok::r_paren)) + return; OS << (int)Value; - if (Tok.is(tok::r_paren)) - Tok.setKind(tok::numeric_constant); + Tok.setKind(tok::numeric_constant); } else if (II == Ident__has_warning) { // The argument should be a parenthesized string literal. // The argument to these builtins should be a parenthesized identifier. diff --git a/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp b/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp index 8ed8328..26ed674 100644 --- a/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp +++ b/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp @@ -22,6 +22,7 @@ #include "clang/Lex/Preprocessor.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/StringSwitch.h" +#include "llvm/ADT/StringExtras.h" #include "llvm/Support/CrashRecoveryContext.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> @@ -400,7 +401,7 @@ void Preprocessor::HandlePragmaPoison(Token &PoisonTok) { if (II->isPoisoned()) continue; // If this is a macro identifier, emit a warning. - if (II->hasMacroDefinition()) + if (isMacroDefined(II)) Diag(Tok, diag::pp_poisoning_existing_macro); // Finally, poison it! @@ -590,8 +591,7 @@ void Preprocessor::HandlePragmaPopMacro(Token &PopMacroTok) { PragmaPushMacroInfo.find(IdentInfo); if (iter != PragmaPushMacroInfo.end()) { // Forget the MacroInfo currently associated with IdentInfo. - if (MacroDirective *CurrentMD = getMacroDirective(IdentInfo)) { - MacroInfo *MI = CurrentMD->getMacroInfo(); + if (MacroInfo *MI = getMacroInfo(IdentInfo)) { if (MI->isWarnIfUnused()) WarnUnusedMacroLocs.erase(MI->getDefinitionLoc()); appendMacroDirective(IdentInfo, AllocateUndefMacroDirective(MessageLoc)); @@ -600,11 +600,9 @@ void Preprocessor::HandlePragmaPopMacro(Token &PopMacroTok) { // Get the MacroInfo we want to reinstall. MacroInfo *MacroToReInstall = iter->second.back(); - if (MacroToReInstall) { + if (MacroToReInstall) // Reinstall the previously pushed macro. - appendDefMacroDirective(IdentInfo, MacroToReInstall, MessageLoc, - /*isImported=*/false, /*Overrides*/None); - } + appendDefMacroDirective(IdentInfo, MacroToReInstall, MessageLoc); // Pop PragmaPushMacroInfo stack. iter->second.pop_back(); @@ -648,7 +646,7 @@ void Preprocessor::HandlePragmaIncludeAlias(Token &Tok) { SourceLocation End; if (ConcatenateIncludeName(FileNameBuffer, End)) return; // Diagnostic already emitted - SourceFileName = FileNameBuffer.str(); + SourceFileName = FileNameBuffer; } else { Diag(Tok, diag::warn_pragma_include_alias_expected_filename); return; @@ -679,7 +677,7 @@ void Preprocessor::HandlePragmaIncludeAlias(Token &Tok) { SourceLocation End; if (ConcatenateIncludeName(FileNameBuffer, End)) return; // Diagnostic already emitted - ReplaceFileName = FileNameBuffer.str(); + ReplaceFileName = FileNameBuffer; } else { Diag(Tok, diag::warn_pragma_include_alias_expected_filename); return; @@ -870,12 +868,22 @@ struct PragmaDebugHandler : public PragmaHandler { LLVM_BUILTIN_TRAP; } else if (II->isStr("parser_crash")) { Token Crasher; + Crasher.startToken(); Crasher.setKind(tok::annot_pragma_parser_crash); + Crasher.setAnnotationRange(SourceRange(Tok.getLocation())); PP.EnterToken(Crasher); } else if (II->isStr("llvm_fatal_error")) { llvm::report_fatal_error("#pragma clang __debug llvm_fatal_error"); } else if (II->isStr("llvm_unreachable")) { llvm_unreachable("#pragma clang __debug llvm_unreachable"); + } else if (II->isStr("macro")) { + Token MacroName; + PP.LexUnexpandedToken(MacroName); + auto *MacroII = MacroName.getIdentifierInfo(); + if (MacroII) + PP.dumpMacroInfo(MacroII); + else + PP.Diag(MacroName, diag::warn_pragma_diagnostic_invalid); } else if (II->isStr("overflow_stack")) { DebugOverflowStack(); } else if (II->isStr("handle_crash")) { @@ -1029,12 +1037,8 @@ struct PragmaWarningHandler : public PragmaHandler { PP.Lex(Tok); IdentifierInfo *II = Tok.getIdentifierInfo(); - if (!II) { - PP.Diag(Tok, diag::warn_pragma_warning_spec_invalid); - return; - } - if (II->isStr("push")) { + if (II && II->isStr("push")) { // #pragma warning( push[ ,n ] ) int Level = -1; PP.Lex(Tok); @@ -1051,7 +1055,7 @@ struct PragmaWarningHandler : public PragmaHandler { } if (Callbacks) Callbacks->PragmaWarningPush(DiagLoc, Level); - } else if (II->isStr("pop")) { + } else if (II && II->isStr("pop")) { // #pragma warning( pop ) PP.Lex(Tok); if (Callbacks) @@ -1061,23 +1065,40 @@ struct PragmaWarningHandler : public PragmaHandler { // [; warning-specifier : warning-number-list...] ) while (true) { II = Tok.getIdentifierInfo(); - if (!II) { + if (!II && !Tok.is(tok::numeric_constant)) { PP.Diag(Tok, diag::warn_pragma_warning_spec_invalid); return; } // Figure out which warning specifier this is. - StringRef Specifier = II->getName(); - bool SpecifierValid = - llvm::StringSwitch<bool>(Specifier) - .Cases("1", "2", "3", "4", true) - .Cases("default", "disable", "error", "once", "suppress", true) - .Default(false); + bool SpecifierValid; + StringRef Specifier; + llvm::SmallString<1> SpecifierBuf; + if (II) { + Specifier = II->getName(); + SpecifierValid = llvm::StringSwitch<bool>(Specifier) + .Cases("default", "disable", "error", "once", + "suppress", true) + .Default(false); + // If we read a correct specifier, snatch next token (that should be + // ":", checked later). + if (SpecifierValid) + PP.Lex(Tok); + } else { + // Token is a numeric constant. It should be either 1, 2, 3 or 4. + uint64_t Value; + Specifier = PP.getSpelling(Tok, SpecifierBuf); + if (PP.parseSimpleIntegerLiteral(Tok, Value)) { + SpecifierValid = (Value >= 1) && (Value <= 4); + } else + SpecifierValid = false; + // Next token already snatched by parseSimpleIntegerLiteral. + } + if (!SpecifierValid) { PP.Diag(Tok, diag::warn_pragma_warning_spec_invalid); return; } - PP.Lex(Tok); if (Tok.isNot(tok::colon)) { PP.Diag(Tok, diag::warn_pragma_warning_expected) << ":"; return; diff --git a/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp b/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp index 41bb581..a423041 100644 --- a/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp +++ b/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp @@ -45,22 +45,23 @@ PreprocessingRecord::PreprocessingRecord(SourceManager &SM) /// \brief Returns a pair of [Begin, End) iterators of preprocessed entities /// that source range \p Range encompasses. -std::pair<PreprocessingRecord::iterator, PreprocessingRecord::iterator> +llvm::iterator_range<PreprocessingRecord::iterator> PreprocessingRecord::getPreprocessedEntitiesInRange(SourceRange Range) { if (Range.isInvalid()) - return std::make_pair(iterator(), iterator()); + return llvm::make_range(iterator(), iterator()); if (CachedRangeQuery.Range == Range) { - return std::make_pair(iterator(this, CachedRangeQuery.Result.first), - iterator(this, CachedRangeQuery.Result.second)); + return llvm::make_range(iterator(this, CachedRangeQuery.Result.first), + iterator(this, CachedRangeQuery.Result.second)); } std::pair<int, int> Res = getPreprocessedEntitiesInRangeSlow(Range); CachedRangeQuery.Range = Range; CachedRangeQuery.Result = Res; - - return std::make_pair(iterator(this, Res.first), iterator(this, Res.second)); + + return llvm::make_range(iterator(this, Res.first), + iterator(this, Res.second)); } static bool isPreprocessedEntityIfInFileID(PreprocessedEntity *PPE, FileID FID, @@ -72,11 +73,8 @@ static bool isPreprocessedEntityIfInFileID(PreprocessedEntity *PPE, FileID FID, SourceLocation Loc = PPE->getSourceRange().getBegin(); if (Loc.isInvalid()) return false; - - if (SM.isInFileID(SM.getFileLoc(Loc), FID)) - return true; - else - return false; + + return SM.isInFileID(SM.getFileLoc(Loc), FID); } /// \brief Returns true if the preprocessed entity that \arg PPEI iterator @@ -90,7 +88,7 @@ bool PreprocessingRecord::isEntityInFileID(iterator PPEI, FileID FID) { if (FID.isInvalid()) return false; - int Pos = PPEI.Position; + int Pos = std::distance(iterator(this, 0), PPEI); if (Pos < 0) { if (unsigned(-Pos-1) >= LoadedPreprocessedEntities.size()) { assert(0 && "Out-of bounds loaded preprocessed entity"); @@ -248,10 +246,11 @@ PreprocessingRecord::addPreprocessedEntity(PreprocessedEntity *Entity) { assert(Entity); SourceLocation BeginLoc = Entity->getSourceRange().getBegin(); - if (isa<MacroDefinition>(Entity)) { + if (isa<MacroDefinitionRecord>(Entity)) { assert((PreprocessedEntities.empty() || - !SourceMgr.isBeforeInTranslationUnit(BeginLoc, - PreprocessedEntities.back()->getSourceRange().getBegin())) && + !SourceMgr.isBeforeInTranslationUnit( + BeginLoc, + PreprocessedEntities.back()->getSourceRange().getBegin())) && "a macro definition was encountered out-of-order"); PreprocessedEntities.push_back(Entity); return getPPEntityID(PreprocessedEntities.size()-1, /*isLoaded=*/false); @@ -320,7 +319,7 @@ unsigned PreprocessingRecord::allocateLoadedEntities(unsigned NumEntities) { } void PreprocessingRecord::RegisterMacroDefinition(MacroInfo *Macro, - MacroDefinition *Def) { + MacroDefinitionRecord *Def) { MacroDefinitions[Macro] = Def; } @@ -357,9 +356,10 @@ PreprocessingRecord::getLoadedPreprocessedEntity(unsigned Index) { return Entity; } -MacroDefinition *PreprocessingRecord::findMacroDefinition(const MacroInfo *MI) { - llvm::DenseMap<const MacroInfo *, MacroDefinition *>::iterator Pos - = MacroDefinitions.find(MI); +MacroDefinitionRecord * +PreprocessingRecord::findMacroDefinition(const MacroInfo *MI) { + llvm::DenseMap<const MacroInfo *, MacroDefinitionRecord *>::iterator Pos = + MacroDefinitions.find(MI); if (Pos == MacroDefinitions.end()) return nullptr; @@ -374,35 +374,34 @@ void PreprocessingRecord::addMacroExpansion(const Token &Id, return; if (MI->isBuiltinMacro()) - addPreprocessedEntity( - new (*this) MacroExpansion(Id.getIdentifierInfo(),Range)); - else if (MacroDefinition *Def = findMacroDefinition(MI)) - addPreprocessedEntity( - new (*this) MacroExpansion(Def, Range)); + addPreprocessedEntity(new (*this) + MacroExpansion(Id.getIdentifierInfo(), Range)); + else if (MacroDefinitionRecord *Def = findMacroDefinition(MI)) + addPreprocessedEntity(new (*this) MacroExpansion(Def, Range)); } void PreprocessingRecord::Ifdef(SourceLocation Loc, const Token &MacroNameTok, - const MacroDirective *MD) { + const MacroDefinition &MD) { // This is not actually a macro expansion but record it as a macro reference. if (MD) - addMacroExpansion(MacroNameTok, MD->getMacroInfo(), + addMacroExpansion(MacroNameTok, MD.getMacroInfo(), MacroNameTok.getLocation()); } void PreprocessingRecord::Ifndef(SourceLocation Loc, const Token &MacroNameTok, - const MacroDirective *MD) { + const MacroDefinition &MD) { // This is not actually a macro expansion but record it as a macro reference. if (MD) - addMacroExpansion(MacroNameTok, MD->getMacroInfo(), + addMacroExpansion(MacroNameTok, MD.getMacroInfo(), MacroNameTok.getLocation()); } void PreprocessingRecord::Defined(const Token &MacroNameTok, - const MacroDirective *MD, + const MacroDefinition &MD, SourceRange Range) { // This is not actually a macro expansion but record it as a macro reference. if (MD) - addMacroExpansion(MacroNameTok, MD->getMacroInfo(), + addMacroExpansion(MacroNameTok, MD.getMacroInfo(), MacroNameTok.getLocation()); } @@ -410,27 +409,26 @@ void PreprocessingRecord::SourceRangeSkipped(SourceRange Range) { SkippedRanges.push_back(Range); } -void PreprocessingRecord::MacroExpands(const Token &Id,const MacroDirective *MD, +void PreprocessingRecord::MacroExpands(const Token &Id, + const MacroDefinition &MD, SourceRange Range, const MacroArgs *Args) { - addMacroExpansion(Id, MD->getMacroInfo(), Range); + addMacroExpansion(Id, MD.getMacroInfo(), Range); } void PreprocessingRecord::MacroDefined(const Token &Id, const MacroDirective *MD) { const MacroInfo *MI = MD->getMacroInfo(); SourceRange R(MI->getDefinitionLoc(), MI->getDefinitionEndLoc()); - MacroDefinition *Def - = new (*this) MacroDefinition(Id.getIdentifierInfo(), R); + MacroDefinitionRecord *Def = + new (*this) MacroDefinitionRecord(Id.getIdentifierInfo(), R); addPreprocessedEntity(Def); MacroDefinitions[MI] = Def; } void PreprocessingRecord::MacroUndefined(const Token &Id, - const MacroDirective *MD) { - // Note: MI may be null (when #undef'ining an undefined macro). - if (MD) - MacroDefinitions.erase(MD->getMacroInfo()); + const MacroDefinition &MD) { + MD.forAllDefinitions([&](MacroInfo *MI) { MacroDefinitions.erase(MI); }); } void PreprocessingRecord::InclusionDirective( diff --git a/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp b/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp index b2a6d93..7e33f1c 100644 --- a/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp +++ b/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp @@ -73,7 +73,8 @@ Preprocessor::Preprocessor(IntrusiveRefCntPtr<PreprocessorOptions> PPOpts, ModuleImportExpectsIdentifier(false), CodeCompletionReached(0), MainFileDir(nullptr), SkipMainFilePreamble(0, true), CurPPLexer(nullptr), CurDirLookup(nullptr), CurLexerKind(CLK_Lexer), CurSubmodule(nullptr), - Callbacks(nullptr), MacroArgCache(nullptr), Record(nullptr), + Callbacks(nullptr), CurSubmoduleState(&NullSubmoduleState), + MacroArgCache(nullptr), Record(nullptr), MIChainHead(nullptr), DeserialMIChainHead(nullptr) { OwnsHeaderSearch = OwnsHeaders; @@ -266,7 +267,9 @@ void Preprocessor::PrintStats() { llvm::errs() << "\n Macro Expanded Tokens: " << llvm::capacity_in_bytes(MacroExpandedTokens); llvm::errs() << "\n Predefines Buffer: " << Predefines.capacity(); - llvm::errs() << "\n Macros: " << llvm::capacity_in_bytes(Macros); + // FIXME: List information for all submodules. + llvm::errs() << "\n Macros: " + << llvm::capacity_in_bytes(CurSubmoduleState->Macros); llvm::errs() << "\n #pragma push_macro Info: " << llvm::capacity_in_bytes(PragmaPushMacroInfo); llvm::errs() << "\n Poison Reasons: " @@ -283,14 +286,16 @@ Preprocessor::macro_begin(bool IncludeExternalMacros) const { ExternalSource->ReadDefinedMacros(); } - return Macros.begin(); + return CurSubmoduleState->Macros.begin(); } size_t Preprocessor::getTotalMemory() const { return BP.getTotalMemory() + llvm::capacity_in_bytes(MacroExpandedTokens) + Predefines.capacity() /* Predefines buffer. */ - + llvm::capacity_in_bytes(Macros) + // FIXME: Include sizes from all submodules, and include MacroInfo sizes, + // and ModuleMacros. + + llvm::capacity_in_bytes(CurSubmoduleState->Macros) + llvm::capacity_in_bytes(PragmaPushMacroInfo) + llvm::capacity_in_bytes(PoisonReasons) + llvm::capacity_in_bytes(CommentHandlers); @@ -304,7 +309,7 @@ Preprocessor::macro_end(bool IncludeExternalMacros) const { ExternalSource->ReadDefinedMacros(); } - return Macros.end(); + return CurSubmoduleState->Macros.end(); } /// \brief Compares macro tokens with a specified token value sequence. @@ -321,11 +326,11 @@ StringRef Preprocessor::getLastMacroWithSpelling( StringRef BestSpelling; for (Preprocessor::macro_iterator I = macro_begin(), E = macro_end(); I != E; ++I) { - if (!I->second->getMacroInfo()->isObjectLike()) - continue; const MacroDirective::DefInfo - Def = I->second->findDirectiveAtLoc(Loc, SourceMgr); - if (!Def) + Def = I->second.findDirectiveAtLoc(Loc, SourceMgr); + if (!Def || !Def.getMacroInfo()) + continue; + if (!Def.getMacroInfo()->isObjectLike()) continue; if (!MacroDefinitionEquals(Def.getMacroInfo(), Tokens)) continue; @@ -584,6 +589,23 @@ void Preprocessor::HandlePoisonedIdentifier(Token & Identifier) { Diag(Identifier,it->second) << Identifier.getIdentifierInfo(); } +/// \brief Returns a diagnostic message kind for reporting a future keyword as +/// appropriate for the identifier and specified language. +static diag::kind getFutureCompatDiagKind(const IdentifierInfo &II, + const LangOptions &LangOpts) { + assert(II.isFutureCompatKeyword() && "diagnostic should not be needed"); + + if (LangOpts.CPlusPlus) + return llvm::StringSwitch<diag::kind>(II.getName()) +#define CXX11_KEYWORD(NAME, FLAGS) \ + .Case(#NAME, diag::warn_cxx11_keyword) +#include "clang/Basic/TokenKinds.def" + ; + + llvm_unreachable( + "Keyword not known to come from a newer Standard or proposed Standard"); +} + /// HandleIdentifier - This callback is invoked when the lexer reads an /// identifier. This callback looks up the identifier in the map and/or /// potentially macro expands it or turns it into a named token (like 'for'). @@ -622,8 +644,9 @@ bool Preprocessor::HandleIdentifier(Token &Identifier) { } // If this is a macro to be expanded, do it. - if (MacroDirective *MD = getMacroDirective(&II)) { - MacroInfo *MI = MD->getMacroInfo(); + if (MacroDefinition MD = getMacroDefinition(&II)) { + auto *MI = MD.getMacroInfo(); + assert(MI && "macro definition with no macro info?"); if (!DisableMacroExpansion) { if (!Identifier.isExpandDisabled() && MI->isEnabled()) { // C99 6.10.3p10: If the preprocessing token immediately after the @@ -641,15 +664,16 @@ bool Preprocessor::HandleIdentifier(Token &Identifier) { } } - // If this identifier is a keyword in C++11, produce a warning. Don't warn if - // we're not considering macro expansion, since this identifier might be the - // name of a macro. + // If this identifier is a keyword in a newer Standard or proposed Standard, + // produce a warning. Don't warn if we're not considering macro expansion, + // since this identifier might be the name of a macro. // FIXME: This warning is disabled in cases where it shouldn't be, like // "#define constexpr constexpr", "int constexpr;" - if (II.isCXX11CompatKeyword() && !DisableMacroExpansion) { - Diag(Identifier, diag::warn_cxx11_keyword) << II.getName(); + if (II.isFutureCompatKeyword() && !DisableMacroExpansion) { + Diag(Identifier, getFutureCompatDiagKind(II, getLangOpts())) + << II.getName(); // Don't diagnose this keyword again in this translation unit. - II.setIsCXX11CompatKeyword(false); + II.setIsFutureCompatKeyword(false); } // C++ 2.11p2: If this is an alternative representation of a C++ operator, @@ -748,16 +772,36 @@ void Preprocessor::LexAfterModuleImport(Token &Result) { // If we have a non-empty module path, load the named module. if (!ModuleImportPath.empty()) { Module *Imported = nullptr; - if (getLangOpts().Modules) + if (getLangOpts().Modules) { Imported = TheModuleLoader.loadModule(ModuleImportLoc, ModuleImportPath, - Module::MacrosVisible, + Module::Hidden, /*IsIncludeDirective=*/false); + if (Imported) + makeModuleVisible(Imported, ModuleImportLoc); + } if (Callbacks && (getLangOpts().Modules || getLangOpts().DebuggerSupport)) Callbacks->moduleImport(ModuleImportLoc, ModuleImportPath, Imported); } } +void Preprocessor::makeModuleVisible(Module *M, SourceLocation Loc) { + CurSubmoduleState->VisibleModules.setVisible( + M, Loc, [](Module *) {}, + [&](ArrayRef<Module *> Path, Module *Conflict, StringRef Message) { + // FIXME: Include the path in the diagnostic. + // FIXME: Include the import location for the conflicting module. + Diag(ModuleImportLoc, diag::warn_module_conflict) + << Path[0]->getFullModuleName() + << Conflict->getFullModuleName() + << Message; + }); + + // Add this module to the imports list of the currently-built submodule. + if (!BuildingSubmoduleStack.empty() && M != BuildingSubmoduleStack.back().M) + BuildingSubmoduleStack.back().M->Imports.insert(M); +} + bool Preprocessor::FinishLexStringLiteral(Token &Result, std::string &String, const char *DiagnosticTag, bool AllowMacroExpansion) { diff --git a/contrib/llvm/tools/clang/lib/Lex/ScratchBuffer.cpp b/contrib/llvm/tools/clang/lib/Lex/ScratchBuffer.cpp index 3bac889..cd8a27e 100644 --- a/contrib/llvm/tools/clang/lib/Lex/ScratchBuffer.cpp +++ b/contrib/llvm/tools/clang/lib/Lex/ScratchBuffer.cpp @@ -64,12 +64,13 @@ void ScratchBuffer::AllocScratchBuffer(unsigned RequestLen) { if (RequestLen < ScratchBufSize) RequestLen = ScratchBufSize; + // Get scratch buffer. Zero-initialize it so it can be dumped into a PCH file + // deterministically. std::unique_ptr<llvm::MemoryBuffer> OwnBuf = llvm::MemoryBuffer::getNewMemBuffer(RequestLen, "<scratch space>"); llvm::MemoryBuffer &Buf = *OwnBuf; FileID FID = SourceMgr.createFileID(std::move(OwnBuf)); BufferStartLoc = SourceMgr.getLocForStartOfFile(FID); CurBuffer = const_cast<char*>(Buf.getBufferStart()); - BytesUsed = 1; - CurBuffer[0] = '0'; // Start out with a \0 for cleanliness. + BytesUsed = 0; } diff --git a/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp b/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp index 5f4705e..83efbab 100644 --- a/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp +++ b/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp @@ -521,6 +521,13 @@ bool TokenLexer::Lex(Token &Tok) { /// are more ## after it, chomp them iteratively. Return the result as Tok. /// If this returns true, the caller should immediately return the token. bool TokenLexer::PasteTokens(Token &Tok) { + // MSVC: If previous token was pasted, this must be a recovery from an invalid + // paste operation. Ignore spaces before this token to mimic MSVC output. + // Required for generating valid UUID strings in some MS headers. + if (PP.getLangOpts().MicrosoftExt && (CurToken >= 2) && + Tokens[CurToken - 2].is(tok::hashhash)) + Tok.clearFlag(Token::LeadingSpace); + SmallString<128> Buffer; const char *ResultTokStrPtr = nullptr; SourceLocation StartLoc = Tok.getLocation(); @@ -637,7 +644,7 @@ bool TokenLexer::PasteTokens(Token &Tok) { // disabling it. PP.Diag(Loc, PP.getLangOpts().MicrosoftExt ? diag::ext_pp_bad_paste_ms : diag::err_pp_bad_paste) - << Buffer.str(); + << Buffer; } // An error has occurred so exit loop. diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp index 59b491a..5da70d0 100644 --- a/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp @@ -26,8 +26,7 @@ NamedDecl *Parser::ParseCXXInlineMethodDef(AccessSpecifier AS, AttributeList *AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, - const VirtSpecifiers& VS, - FunctionDefinitionKind DefinitionKind, + const VirtSpecifiers& VS, ExprResult& Init) { assert(D.isFunctionDeclarator() && "This isn't a function declarator!"); assert((Tok.is(tok::l_brace) || Tok.is(tok::colon) || Tok.is(tok::kw_try) || @@ -40,7 +39,6 @@ NamedDecl *Parser::ParseCXXInlineMethodDef(AccessSpecifier AS, TemplateInfo.TemplateParams ? TemplateInfo.TemplateParams->size() : 0); NamedDecl *FnD; - D.setFunctionDefinitionKind(DefinitionKind); if (D.getDeclSpec().isFriendSpecified()) FnD = Actions.ActOnFriendFunctionDecl(getCurScope(), D, TemplateParams); @@ -71,17 +69,24 @@ NamedDecl *Parser::ParseCXXInlineMethodDef(AccessSpecifier AS, bool Delete = false; SourceLocation KWLoc; + SourceLocation KWEndLoc = Tok.getEndLoc().getLocWithOffset(-1); if (TryConsumeToken(tok::kw_delete, KWLoc)) { Diag(KWLoc, getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_deleted_function : diag::ext_deleted_function); Actions.SetDeclDeleted(FnD, KWLoc); Delete = true; + if (auto *DeclAsFunction = dyn_cast<FunctionDecl>(FnD)) { + DeclAsFunction->setRangeEnd(KWEndLoc); + } } else if (TryConsumeToken(tok::kw_default, KWLoc)) { Diag(KWLoc, getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_defaulted_function : diag::ext_defaulted_function); Actions.SetDeclDefaulted(FnD, KWLoc); + if (auto *DeclAsFunction = dyn_cast<FunctionDecl>(FnD)) { + DeclAsFunction->setRangeEnd(KWEndLoc); + } } else { llvm_unreachable("function definition after = not 'delete' or 'default'"); } @@ -97,12 +102,12 @@ NamedDecl *Parser::ParseCXXInlineMethodDef(AccessSpecifier AS, return FnD; } - + // In delayed template parsing mode, if we are within a class template // or if we are about to parse function member template then consume // the tokens and store them for parsing at the end of the translation unit. if (getLangOpts().DelayedTemplateParsing && - DefinitionKind == FDK_Definition && + D.getFunctionDefinitionKind() == FDK_Definition && !D.getDeclSpec().isConstexprSpecified() && !(FnD && FnD->getAsFunction() && FnD->getAsFunction()->getReturnType()->getContainedAutoType()) && @@ -306,9 +311,10 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) { ParseScope PrototypeScope(this, Scope::FunctionPrototypeScope | Scope::FunctionDeclarationScope | Scope::DeclScope); for (unsigned I = 0, N = LM.DefaultArgs.size(); I != N; ++I) { + auto Param = cast<ParmVarDecl>(LM.DefaultArgs[I].Param); // Introduce the parameter into scope. - Actions.ActOnDelayedCXXMethodParameter(getCurScope(), - LM.DefaultArgs[I].Param); + bool HasUnparsed = Param->hasUnparsedDefaultArg(); + Actions.ActOnDelayedCXXMethodParameter(getCurScope(), Param); if (CachedTokens *Toks = LM.DefaultArgs[I].Toks) { // Mark the end of the default argument so that we know when to stop when // we parse it later on. @@ -316,9 +322,8 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) { Token DefArgEnd; DefArgEnd.startToken(); DefArgEnd.setKind(tok::eof); - DefArgEnd.setLocation(LastDefaultArgToken.getLocation().getLocWithOffset( - LastDefaultArgToken.getLength())); - DefArgEnd.setEofData(LM.DefaultArgs[I].Param); + DefArgEnd.setLocation(LastDefaultArgToken.getEndLoc()); + DefArgEnd.setEofData(Param); Toks->push_back(DefArgEnd); // Parse the default argument from its saved token stream. @@ -336,7 +341,7 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) { // used. EnterExpressionEvaluationContext Eval(Actions, Sema::PotentiallyEvaluatedIfUsed, - LM.DefaultArgs[I].Param); + Param); ExprResult DefArgResult; if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) { @@ -346,11 +351,9 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) { DefArgResult = ParseAssignmentExpression(); DefArgResult = Actions.CorrectDelayedTyposInExpr(DefArgResult); if (DefArgResult.isInvalid()) { - Actions.ActOnParamDefaultArgumentError(LM.DefaultArgs[I].Param, - EqualLoc); + Actions.ActOnParamDefaultArgumentError(Param, EqualLoc); } else { - if (Tok.isNot(tok::eof) || - Tok.getEofData() != LM.DefaultArgs[I].Param) { + if (Tok.isNot(tok::eof) || Tok.getEofData() != Param) { // The last two tokens are the terminator and the saved value of // Tok; the last token in the default argument is the one before // those. @@ -359,7 +362,7 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) { << SourceRange(Tok.getLocation(), (*Toks)[Toks->size() - 3].getLocation()); } - Actions.ActOnParamDefaultArgument(LM.DefaultArgs[I].Param, EqualLoc, + Actions.ActOnParamDefaultArgument(Param, EqualLoc, DefArgResult.get()); } @@ -368,11 +371,21 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) { while (Tok.isNot(tok::eof)) ConsumeAnyToken(); - if (Tok.is(tok::eof) && Tok.getEofData() == LM.DefaultArgs[I].Param) + if (Tok.is(tok::eof) && Tok.getEofData() == Param) ConsumeAnyToken(); delete Toks; LM.DefaultArgs[I].Toks = nullptr; + } else if (HasUnparsed) { + assert(Param->hasInheritedDefaultArg()); + FunctionDecl *Old = cast<FunctionDecl>(LM.Method)->getPreviousDecl(); + ParmVarDecl *OldParam = Old->getParamDecl(I); + assert (!OldParam->hasUnparsedDefaultArg()); + if (OldParam->hasUninstantiatedDefaultArg()) + Param->setUninstantiatedDefaultArg( + Param->getUninstantiatedDefaultArg()); + else + Param->setDefaultArg(OldParam->getInit()); } } @@ -383,9 +396,7 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) { Token ExceptionSpecEnd; ExceptionSpecEnd.startToken(); ExceptionSpecEnd.setKind(tok::eof); - ExceptionSpecEnd.setLocation( - LastExceptionSpecToken.getLocation().getLocWithOffset( - LastExceptionSpecToken.getLength())); + ExceptionSpecEnd.setLocation(LastExceptionSpecToken.getEndLoc()); ExceptionSpecEnd.setEofData(LM.Method); Toks->push_back(ExceptionSpecEnd); @@ -490,8 +501,7 @@ void Parser::ParseLexedMethodDef(LexedMethod &LM) { Token BodyEnd; BodyEnd.startToken(); BodyEnd.setKind(tok::eof); - BodyEnd.setLocation( - LastBodyToken.getLocation().getLocWithOffset(LastBodyToken.getLength())); + BodyEnd.setLocation(LastBodyToken.getEndLoc()); BodyEnd.setEofData(LM.D); LM.Toks.push_back(BodyEnd); // Append the current token at the end of the new token stream so that it diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp index 4d05e16..bd114d7 100644 --- a/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp @@ -529,64 +529,72 @@ bool Parser::ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, /// [MS] extended-decl-modifier-seq: /// extended-decl-modifier[opt] /// extended-decl-modifier extended-decl-modifier-seq -void Parser::ParseMicrosoftDeclSpec(ParsedAttributes &Attrs) { +void Parser::ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, + SourceLocation *End) { + assert((getLangOpts().MicrosoftExt || getLangOpts().Borland || + getLangOpts().CUDA) && + "Incorrect language options for parsing __declspec"); assert(Tok.is(tok::kw___declspec) && "Not a declspec!"); - ConsumeToken(); - BalancedDelimiterTracker T(*this, tok::l_paren); - if (T.expectAndConsume(diag::err_expected_lparen_after, "__declspec", - tok::r_paren)) - return; - - // An empty declspec is perfectly legal and should not warn. Additionally, - // you can specify multiple attributes per declspec. - while (Tok.isNot(tok::r_paren)) { - // Attribute not present. - if (TryConsumeToken(tok::comma)) - continue; - - // We expect either a well-known identifier or a generic string. Anything - // else is a malformed declspec. - bool IsString = Tok.getKind() == tok::string_literal ? true : false; - if (!IsString && Tok.getKind() != tok::identifier && - Tok.getKind() != tok::kw_restrict) { - Diag(Tok, diag::err_ms_declspec_type); - T.skipToEnd(); + while (Tok.is(tok::kw___declspec)) { + ConsumeToken(); + BalancedDelimiterTracker T(*this, tok::l_paren); + if (T.expectAndConsume(diag::err_expected_lparen_after, "__declspec", + tok::r_paren)) return; - } - IdentifierInfo *AttrName; - SourceLocation AttrNameLoc; - if (IsString) { - SmallString<8> StrBuffer; - bool Invalid = false; - StringRef Str = PP.getSpelling(Tok, StrBuffer, &Invalid); - if (Invalid) { + // An empty declspec is perfectly legal and should not warn. Additionally, + // you can specify multiple attributes per declspec. + while (Tok.isNot(tok::r_paren)) { + // Attribute not present. + if (TryConsumeToken(tok::comma)) + continue; + + // We expect either a well-known identifier or a generic string. Anything + // else is a malformed declspec. + bool IsString = Tok.getKind() == tok::string_literal; + if (!IsString && Tok.getKind() != tok::identifier && + Tok.getKind() != tok::kw_restrict) { + Diag(Tok, diag::err_ms_declspec_type); T.skipToEnd(); return; } - AttrName = PP.getIdentifierInfo(Str); - AttrNameLoc = ConsumeStringToken(); - } else { - AttrName = Tok.getIdentifierInfo(); - AttrNameLoc = ConsumeToken(); - } - bool AttrHandled = false; + IdentifierInfo *AttrName; + SourceLocation AttrNameLoc; + if (IsString) { + SmallString<8> StrBuffer; + bool Invalid = false; + StringRef Str = PP.getSpelling(Tok, StrBuffer, &Invalid); + if (Invalid) { + T.skipToEnd(); + return; + } + AttrName = PP.getIdentifierInfo(Str); + AttrNameLoc = ConsumeStringToken(); + } else { + AttrName = Tok.getIdentifierInfo(); + AttrNameLoc = ConsumeToken(); + } + + bool AttrHandled = false; - // Parse attribute arguments. - if (Tok.is(tok::l_paren)) - AttrHandled = ParseMicrosoftDeclSpecArgs(AttrName, AttrNameLoc, Attrs); - else if (AttrName->getName() == "property") - // The property attribute must have an argument list. - Diag(Tok.getLocation(), diag::err_expected_lparen_after) - << AttrName->getName(); + // Parse attribute arguments. + if (Tok.is(tok::l_paren)) + AttrHandled = ParseMicrosoftDeclSpecArgs(AttrName, AttrNameLoc, Attrs); + else if (AttrName->getName() == "property") + // The property attribute must have an argument list. + Diag(Tok.getLocation(), diag::err_expected_lparen_after) + << AttrName->getName(); - if (!AttrHandled) - Attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0, - AttributeList::AS_Declspec); + if (!AttrHandled) + Attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0, + AttributeList::AS_Declspec); + } + T.consumeClose(); + if (End) + *End = T.getCloseLocation(); } - T.consumeClose(); } void Parser::ParseMicrosoftTypeAttributes(ParsedAttributes &attrs) { @@ -1360,6 +1368,46 @@ void Parser::ProhibitCXX11Attributes(ParsedAttributesWithRange &attrs) { } } +// As an exception to the rule, __declspec(align(...)) before the +// class-key affects the type instead of the variable. +void Parser::handleDeclspecAlignBeforeClassKey(ParsedAttributesWithRange &Attrs, + DeclSpec &DS, + Sema::TagUseKind TUK) { + if (TUK == Sema::TUK_Reference) + return; + + ParsedAttributes &PA = DS.getAttributes(); + AttributeList *AL = PA.getList(); + AttributeList *Prev = nullptr; + while (AL) { + AttributeList *Next = AL->getNext(); + + // We only consider attributes using the appropriate '__declspec' spelling, + // this behavior doesn't extend to any other spellings. + if (AL->getKind() == AttributeList::AT_Aligned && + AL->isDeclspecAttribute()) { + // Stitch the attribute into the tag's attribute list. + AL->setNext(nullptr); + Attrs.add(AL); + + // Remove the attribute from the variable's attribute list. + if (Prev) { + // Set the last variable attribute's next attribute to be the attribute + // after the current one. + Prev->setNext(Next); + } else { + // Removing the head of the list requires us to reset the head to the + // next attribute. + PA.set(Next); + } + } else { + Prev = AL; + } + + AL = Next; + } +} + /// ParseDeclaration - Parse a full 'declaration', which consists of /// declaration-specifiers, some number of declarators, and a semicolon. /// 'Context' should be a Declarator::TheContext value. This returns the @@ -1691,11 +1739,12 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS, } if (isDeclarationSpecifier()) { - // If there is an invalid declaration specifier right after the function - // prototype, then we must be in a missing semicolon case where this isn't - // actually a body. Just fall through into the code that handles it as a - // prototype, and let the top-level code handle the erroneous declspec - // where it would otherwise expect a comma or semicolon. + // If there is an invalid declaration specifier right after the + // function prototype, then we must be in a missing semicolon case + // where this isn't actually a body. Just fall through into the code + // that handles it as a prototype, and let the top-level code handle + // the erroneous declspec where it would otherwise expect a comma or + // semicolon. } else { Diag(Tok, diag::err_expected_fn_body); SkipUntil(tok::semi); @@ -1993,7 +2042,11 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes( Actions.ActOnCXXEnterDeclInitializer(getCurScope(), ThisDecl); } - if (ParseExpressionList(Exprs, CommaLocs)) { + if (ParseExpressionList(Exprs, CommaLocs, [&] { + Actions.CodeCompleteConstructor(getCurScope(), + cast<VarDecl>(ThisDecl)->getType()->getCanonicalTypeInternal(), + ThisDecl->getLocation(), Exprs); + })) { Actions.ActOnInitializerError(ThisDecl); SkipUntil(tok::r_paren, StopAtSemi); @@ -2564,7 +2617,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS, bool EnteringContext = (DSContext == DSC_class || DSContext == DSC_top_level); bool AttrsLastTime = false; ParsedAttributesWithRange attrs(AttrFactory); - const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy(); + // We use Sema's policy to get bool macros right. + const PrintingPolicy &Policy = Actions.getPrintingPolicy(); while (1) { bool isInvalid = false; bool isStorageClass = false; @@ -2871,9 +2925,9 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS, Tok.getLocation(), getCurScope()); // MSVC: If we weren't able to parse a default template argument, and it's - // just a simple identifier, create a DependentNameType. This will allow us - // to defer the name lookup to template instantiation time, as long we forge a - // NestedNameSpecifier for the current context. + // just a simple identifier, create a DependentNameType. This will allow + // us to defer the name lookup to template instantiation time, as long we + // forge a NestedNameSpecifier for the current context. if (!TypeRep && DSContext == DSC_template_type_arg && getLangOpts().MSVCCompat && getCurScope()->isTemplateParamScope()) { TypeRep = Actions.ActOnDelayedDefaultTemplateArg( @@ -2950,7 +3004,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS, // Microsoft declspec support. case tok::kw___declspec: - ParseMicrosoftDeclSpec(DS.getAttributes()); + ParseMicrosoftDeclSpecs(DS.getAttributes()); continue; // Microsoft single token adornments. @@ -3594,10 +3648,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS, ParsedAttributesWithRange attrs(AttrFactory); MaybeParseGNUAttributes(attrs); MaybeParseCXX11Attributes(attrs); - - // If declspecs exist after tag, parse them. - while (Tok.is(tok::kw___declspec)) - ParseMicrosoftDeclSpec(attrs); + MaybeParseMicrosoftDeclSpecs(attrs); SourceLocation ScopedEnumKWLoc; bool IsScopedUsingClassTag = false; @@ -3616,8 +3667,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS, // They are allowed afterwards, though. MaybeParseGNUAttributes(attrs); MaybeParseCXX11Attributes(attrs); - while (Tok.is(tok::kw___declspec)) - ParseMicrosoftDeclSpec(attrs); + MaybeParseMicrosoftDeclSpecs(attrs); } // C++11 [temp.explicit]p12: @@ -3644,11 +3694,12 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS, // if a fixed underlying type is allowed. ColonProtectionRAIIObject X(*this, AllowFixedUnderlyingType); - if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(), + CXXScopeSpec Spec; + if (ParseOptionalCXXScopeSpecifier(Spec, ParsedType(), /*EnteringContext=*/true)) return; - if (SS.isSet() && Tok.isNot(tok::identifier)) { + if (Spec.isSet() && Tok.isNot(tok::identifier)) { Diag(Tok, diag::err_expected) << tok::identifier; if (Tok.isNot(tok::l_brace)) { // Has no name and is not a definition. @@ -3657,6 +3708,8 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS, return; } } + + SS = Spec; } // Must have either 'enum name' or 'enum {...}'. @@ -3842,6 +3895,15 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS, return; } + handleDeclspecAlignBeforeClassKey(attrs, DS, TUK); + + Sema::SkipBodyInfo SkipBody; + if (!Name && TUK == Sema::TUK_Definition && Tok.is(tok::l_brace) && + NextToken().is(tok::identifier)) + SkipBody = Actions.shouldSkipAnonEnumBody(getCurScope(), + NextToken().getIdentifierInfo(), + NextToken().getLocation()); + bool Owned = false; bool IsDependent = false; const char *PrevSpec = nullptr; @@ -3851,7 +3913,22 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS, AS, DS.getModulePrivateSpecLoc(), TParams, Owned, IsDependent, ScopedEnumKWLoc, IsScopedUsingClassTag, BaseType, - DSC == DSC_type_specifier); + DSC == DSC_type_specifier, &SkipBody); + + if (SkipBody.ShouldSkip) { + assert(TUK == Sema::TUK_Definition && "can only skip a definition"); + + BalancedDelimiterTracker T(*this, tok::l_brace); + T.consumeOpen(); + T.skipToEnd(); + + if (DS.SetTypeSpecType(DeclSpec::TST_enum, StartLoc, + NameLoc.isValid() ? NameLoc : StartLoc, + PrevSpec, DiagID, TagDecl, Owned, + Actions.getASTContext().getPrintingPolicy())) + Diag(StartLoc, DiagID) << PrevSpec; + return; + } if (IsDependent) { // This enum has a dependent nested-name-specifier. Handle it as a @@ -3923,6 +4000,7 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) { Diag(Tok, diag::error_empty_enum); SmallVector<Decl *, 32> EnumConstantDecls; + SmallVector<SuppressAccessChecks, 32> EnumAvailabilityDiags; Decl *LastEnumConstDecl = nullptr; @@ -3953,7 +4031,7 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) { SourceLocation EqualLoc; ExprResult AssignedVal; - ParsingDeclRAIIObject PD(*this, ParsingDeclRAIIObject::NoParent); + EnumAvailabilityDiags.emplace_back(*this); if (TryConsumeToken(tok::equal, EqualLoc)) { AssignedVal = ParseConstantExpression(); @@ -3967,7 +4045,7 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) { IdentLoc, Ident, attrs.getList(), EqualLoc, AssignedVal.get()); - PD.complete(EnumConstDecl); + EnumAvailabilityDiags.back().done(); EnumConstantDecls.push_back(EnumConstDecl); LastEnumConstDecl = EnumConstDecl; @@ -4023,6 +4101,14 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) { getCurScope(), attrs.getList()); + // Now handle enum constant availability diagnostics. + assert(EnumConstantDecls.size() == EnumAvailabilityDiags.size()); + for (size_t i = 0, e = EnumConstantDecls.size(); i != e; ++i) { + ParsingDeclRAIIObject PD(*this, ParsingDeclRAIIObject::NoParent); + EnumAvailabilityDiags[i].redelay(); + PD.complete(EnumConstantDecls[i]); + } + EnumScope.Exit(); Actions.ActOnTagFinishDefinition(getCurScope(), EnumDecl, T.getCloseLocation()); @@ -4773,7 +4859,8 @@ void Parser::ParseDeclaratorInternal(Declarator &D, D.AddTypeInfo(DeclaratorChunk::getPointer(DS.getTypeQualifiers(), Loc, DS.getConstSpecLoc(), DS.getVolatileSpecLoc(), - DS.getRestrictSpecLoc()), + DS.getRestrictSpecLoc(), + DS.getAtomicSpecLoc()), DS.getAttributes(), SourceLocation()); else @@ -4916,7 +5003,8 @@ void Parser::ParseDirectDeclarator(Declarator &D) { } if (D.getCXXScopeSpec().isValid()) { - if (Actions.ShouldEnterDeclaratorScope(getCurScope(), D.getCXXScopeSpec())) + if (Actions.ShouldEnterDeclaratorScope(getCurScope(), + D.getCXXScopeSpec())) // Change the declaration context for name lookup, until this function // is exited (and the declarator has been parsed). DeclScopeObj.EnterDeclaratorScope(); @@ -4968,6 +5056,7 @@ void Parser::ParseDirectDeclarator(Declarator &D) { AllowConstructorName = (D.getContext() == Declarator::MemberContext); SourceLocation TemplateKWLoc; + bool HadScope = D.getCXXScopeSpec().isValid(); if (ParseUnqualifiedId(D.getCXXScopeSpec(), /*EnteringContext=*/true, /*AllowDestructorName=*/true, @@ -4981,6 +5070,13 @@ void Parser::ParseDirectDeclarator(Declarator &D) { D.SetIdentifier(nullptr, Tok.getLocation()); D.setInvalidType(true); } else { + // ParseUnqualifiedId might have parsed a scope specifier during error + // recovery. If it did so, enter that scope. + if (!HadScope && D.getCXXScopeSpec().isValid() && + Actions.ShouldEnterDeclaratorScope(getCurScope(), + D.getCXXScopeSpec())) + DeclScopeObj.EnterDeclaratorScope(); + // Parsed the unqualified-id; update range information and move along. if (D.getSourceRange().getBegin().isInvalid()) D.SetRangeBegin(D.getName().getSourceRange().getBegin()); @@ -5022,7 +5118,8 @@ void Parser::ParseDirectDeclarator(Declarator &D) { // If there was an error parsing parenthesized declarator, declarator // scope may have been entered before. Don't do it again. if (!D.isInvalidType() && - Actions.ShouldEnterDeclaratorScope(getCurScope(), D.getCXXScopeSpec())) + Actions.ShouldEnterDeclaratorScope(getCurScope(), + D.getCXXScopeSpec())) // Change the declaration context for name lookup, until this function // is exited (and the declarator has been parsed). DeclScopeObj.EnterDeclaratorScope(); @@ -5310,7 +5407,7 @@ void Parser::ParseFunctionDeclarator(Declarator &D, if (getLangOpts().CPlusPlus) { // FIXME: Accept these components in any order, and produce fixits to // correct the order if the user gets it wrong. Ideally we should deal - // with the virt-specifier-seq and pure-specifier in the same way. + // with the pure-specifier in the same way. // Parse cv-qualifier-seq[opt]. ParseTypeQualifierListOpt(DS, AR_NoAttributesParsed, @@ -5323,15 +5420,8 @@ void Parser::ParseFunctionDeclarator(Declarator &D, } // Parse ref-qualifier[opt]. - if (Tok.is(tok::amp) || Tok.is(tok::ampamp)) { - Diag(Tok, getLangOpts().CPlusPlus11 ? - diag::warn_cxx98_compat_ref_qualifier : - diag::ext_ref_qualifier); - - RefQualifierIsLValueRef = Tok.is(tok::amp); - RefQualifierLoc = ConsumeToken(); + if (ParseRefQualifier(RefQualifierIsLValueRef, RefQualifierLoc)) EndLoc = RefQualifierLoc; - } // C++11 [expr.prim.general]p3: // If a declaration declares a member function or member function @@ -5427,6 +5517,22 @@ void Parser::ParseFunctionDeclarator(Declarator &D, FnAttrs, EndLoc); } +/// ParseRefQualifier - Parses a member function ref-qualifier. Returns +/// true if a ref-qualifier is found. +bool Parser::ParseRefQualifier(bool &RefQualifierIsLValueRef, + SourceLocation &RefQualifierLoc) { + if (Tok.is(tok::amp) || Tok.is(tok::ampamp)) { + Diag(Tok, getLangOpts().CPlusPlus11 ? + diag::warn_cxx98_compat_ref_qualifier : + diag::ext_ref_qualifier); + + RefQualifierIsLValueRef = Tok.is(tok::amp); + RefQualifierLoc = ConsumeToken(); + return true; + } + return false; +} + /// isFunctionDeclaratorIdentifierList - This parameter list may have an /// identifier list form for a K&R-style function: void foo(a,b,c) /// diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp index 87d9909..53e4a41 100644 --- a/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp @@ -558,6 +558,7 @@ Decl *Parser::ParseUsingDeclaration(unsigned Context, // Maybe this is an alias-declaration. TypeResult TypeAlias; bool IsAliasDecl = Tok.is(tok::equal); + Decl *DeclFromDeclSpec = nullptr; if (IsAliasDecl) { // If we had any misplaced attributes from earlier, this is where they // should have been written. @@ -612,10 +613,12 @@ Decl *Parser::ParseUsingDeclaration(unsigned Context, Diag(SS.getBeginLoc(), diag::err_alias_declaration_not_identifier) << FixItHint::CreateRemoval(SS.getRange()); - TypeAlias = ParseTypeName(nullptr, TemplateInfo.Kind ? - Declarator::AliasTemplateContext : - Declarator::AliasDeclContext, AS, OwnedType, - &Attrs); + TypeAlias = ParseTypeName(nullptr, TemplateInfo.Kind + ? Declarator::AliasTemplateContext + : Declarator::AliasDeclContext, + AS, &DeclFromDeclSpec, &Attrs); + if (OwnedType) + *OwnedType = DeclFromDeclSpec; } else { // C++11 attributes are not allowed on a using-declaration, but GNU ones // are. @@ -664,7 +667,7 @@ Decl *Parser::ParseUsingDeclaration(unsigned Context, TemplateParams ? TemplateParams->size() : 0); return Actions.ActOnAliasDeclaration(getCurScope(), AS, TemplateParamsArg, UsingLoc, Name, Attrs.getList(), - TypeAlias); + TypeAlias, DeclFromDeclSpec); } return Actions.ActOnUsingDeclaration(getCurScope(), AS, @@ -796,7 +799,10 @@ SourceLocation Parser::ParseDecltypeSpecifier(DeclSpec &DS) { // The operand of the decltype specifier is an unevaluated operand. EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated, nullptr,/*IsDecltype=*/true); - Result = Actions.CorrectDelayedTyposInExpr(ParseExpression()); + Result = + Actions.CorrectDelayedTyposInExpr(ParseExpression(), [](Expr *E) { + return E->hasPlaceholderType() ? ExprError() : E; + }); if (Result.isInvalid()) { DS.SetTypeSpecError(); if (SkipUntil(tok::r_paren, StopAtSemi | StopBeforeMatch)) { @@ -1223,10 +1229,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind, ParsedAttributesWithRange attrs(AttrFactory); // If attributes exist after tag, parse them. MaybeParseGNUAttributes(attrs); - - // If declspecs exist after tag, parse them. - while (Tok.is(tok::kw___declspec)) - ParseMicrosoftDeclSpec(attrs); + MaybeParseMicrosoftDeclSpecs(attrs); // Parse inheritance specifiers. if (Tok.is(tok::kw___single_inheritance) || @@ -1310,11 +1313,19 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind, // is a base-specifier-list. ColonProtectionRAIIObject X(*this); - if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(), EnteringContext)) + CXXScopeSpec Spec; + bool HasValidSpec = true; + if (ParseOptionalCXXScopeSpecifier(Spec, ParsedType(), EnteringContext)) { DS.SetTypeSpecError(); - if (SS.isSet()) - if (Tok.isNot(tok::identifier) && Tok.isNot(tok::annot_template_id)) + HasValidSpec = false; + } + if (Spec.isSet()) + if (Tok.isNot(tok::identifier) && Tok.isNot(tok::annot_template_id)) { Diag(Tok, diag::err_expected) << tok::identifier; + HasValidSpec = false; + } + if (HasValidSpec) + SS = Spec; } TemplateParameterLists *TemplateParams = TemplateInfo.TemplateParams; @@ -1539,6 +1550,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind, TypeResult TypeResult = true; // invalid bool Owned = false; + Sema::SkipBodyInfo SkipBody; if (TemplateId) { // Explicit specialization, class template partial specialization, // or explicit instantiation. @@ -1625,7 +1637,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind, *TemplateId, attrs.getList(), MultiTemplateParamsArg(TemplateParams ? &(*TemplateParams)[0] : nullptr, - TemplateParams ? TemplateParams->size() : 0)); + TemplateParams ? TemplateParams->size() : 0), + &SkipBody); } } else if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation && TUK == Sema::TUK_Declaration) { @@ -1677,6 +1690,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind, TParams = MultiTemplateParamsArg(&(*TemplateParams)[0], TemplateParams->size()); + handleDeclspecAlignBeforeClassKey(attrs, DS, TUK); + // Declaration or definition of a class type TagOrTempResult = Actions.ActOnTag(getCurScope(), TagType, TUK, StartLoc, SS, Name, NameLoc, attrs.getList(), AS, @@ -1684,7 +1699,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind, TParams, Owned, IsDependent, SourceLocation(), false, clang::TypeResult(), - DSC == DSC_type_specifier); + DSC == DSC_type_specifier, + &SkipBody); // If ActOnTag said the type was dependent, try again with the // less common call. @@ -1700,7 +1716,10 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind, assert(Tok.is(tok::l_brace) || (getLangOpts().CPlusPlus && Tok.is(tok::colon)) || isCXX11FinalKeyword()); - if (getLangOpts().CPlusPlus) + if (SkipBody.ShouldSkip) + SkipCXXMemberSpecification(StartLoc, AttrFixitLoc, TagType, + TagOrTempResult.get()); + else if (getLangOpts().CPlusPlus) ParseCXXMemberSpecification(StartLoc, AttrFixitLoc, attrs, TagType, TagOrTempResult.get()); else @@ -1882,51 +1901,40 @@ AccessSpecifier Parser::getAccessSpecifierIfPresent() const { /// the class definition. void Parser::HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl) { - // We just declared a member function. If this member function - // has any default arguments or an exception-specification, we'll need to - // parse them later. - LateParsedMethodDeclaration *LateMethod = nullptr; DeclaratorChunk::FunctionTypeInfo &FTI = DeclaratorInfo.getFunctionTypeInfo(); + // If there was a late-parsed exception-specification, we'll need a + // late parse + bool NeedLateParse = FTI.getExceptionSpecType() == EST_Unparsed; + + if (!NeedLateParse) { + // Look ahead to see if there are any default args + for (unsigned ParamIdx = 0; ParamIdx < FTI.NumParams; ++ParamIdx) { + auto Param = cast<ParmVarDecl>(FTI.Params[ParamIdx].Param); + if (Param->hasUnparsedDefaultArg()) { + NeedLateParse = true; + break; + } + } + } - // If there was a late-parsed exception-specification, hold onto its tokens. - if (FTI.getExceptionSpecType() == EST_Unparsed) { + if (NeedLateParse) { // Push this method onto the stack of late-parsed method // declarations. - LateMethod = new LateParsedMethodDeclaration(this, ThisDecl); + auto LateMethod = new LateParsedMethodDeclaration(this, ThisDecl); getCurrentClass().LateParsedDeclarations.push_back(LateMethod); LateMethod->TemplateScope = getCurScope()->isTemplateParamScope(); - // Stash the exception-specification tokens in the late-pased mthod. + // Stash the exception-specification tokens in the late-pased method. LateMethod->ExceptionSpecTokens = FTI.ExceptionSpecTokens; FTI.ExceptionSpecTokens = 0; - // Reserve space for the parameters. + // Push tokens for each parameter. Those that do not have + // defaults will be NULL. LateMethod->DefaultArgs.reserve(FTI.NumParams); - } - - for (unsigned ParamIdx = 0; ParamIdx < FTI.NumParams; ++ParamIdx) { - if (LateMethod || FTI.Params[ParamIdx].DefaultArgTokens) { - if (!LateMethod) { - // Push this method onto the stack of late-parsed method - // declarations. - LateMethod = new LateParsedMethodDeclaration(this, ThisDecl); - getCurrentClass().LateParsedDeclarations.push_back(LateMethod); - LateMethod->TemplateScope = getCurScope()->isTemplateParamScope(); - - // Add all of the parameters prior to this one (they don't - // have default arguments). - LateMethod->DefaultArgs.reserve(FTI.NumParams); - for (unsigned I = 0; I < ParamIdx; ++I) - LateMethod->DefaultArgs.push_back( - LateParsedDefaultArgument(FTI.Params[I].Param)); - } - - // Add this parameter to the list of parameters (it may or may - // not have a default argument). + for (unsigned ParamIdx = 0; ParamIdx < FTI.NumParams; ++ParamIdx) LateMethod->DefaultArgs.push_back(LateParsedDefaultArgument( - FTI.Params[ParamIdx].Param, FTI.Params[ParamIdx].DefaultArgTokens)); - } + FTI.Params[ParamIdx].Param, FTI.Params[ParamIdx].DefaultArgTokens)); } } @@ -2019,7 +2027,7 @@ bool Parser::isCXX11FinalKeyword() const { /// \brief Parse a C++ member-declarator up to, but not including, the optional /// brace-or-equal-initializer or pure-specifier. -void Parser::ParseCXXMemberDeclaratorBeforeInitializer( +bool Parser::ParseCXXMemberDeclaratorBeforeInitializer( Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateParsedAttrs) { // member-declarator: @@ -2037,10 +2045,13 @@ void Parser::ParseCXXMemberDeclaratorBeforeInitializer( BitfieldSize = ParseConstantExpression(); if (BitfieldSize.isInvalid()) SkipUntil(tok::comma, StopAtSemi | StopBeforeMatch); - } else + } else { ParseOptionalCXX11VirtSpecifierSeq( VS, getCurrentClass().IsInterface, DeclaratorInfo.getDeclSpec().getFriendSpecLoc()); + if (!VS.isUnset()) + MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(DeclaratorInfo, VS); + } // If a simple-asm-expr is present, parse it. if (Tok.is(tok::kw_asm)) { @@ -2071,6 +2082,78 @@ void Parser::ParseCXXMemberDeclaratorBeforeInitializer( Diag(Attr->getLoc(), diag::warn_gcc_attribute_location); Attr = Attr->getNext(); } + MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(DeclaratorInfo, VS); + } + } + + // If this has neither a name nor a bit width, something has gone seriously + // wrong. Skip until the semi-colon or }. + if (!DeclaratorInfo.hasName() && BitfieldSize.isUnset()) { + // If so, skip until the semi-colon or a }. + SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch); + return true; + } + return false; +} + +/// \brief Look for declaration specifiers possibly occurring after C++11 +/// virt-specifier-seq and diagnose them. +void Parser::MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq( + Declarator &D, + VirtSpecifiers &VS) { + DeclSpec DS(AttrFactory); + + // GNU-style and C++11 attributes are not allowed here, but they will be + // handled by the caller. Diagnose everything else. + ParseTypeQualifierListOpt(DS, AR_NoAttributesParsed, false); + D.ExtendWithDeclSpec(DS); + + if (D.isFunctionDeclarator()) { + auto &Function = D.getFunctionTypeInfo(); + if (DS.getTypeQualifiers() != DeclSpec::TQ_unspecified) { + auto DeclSpecCheck = [&] (DeclSpec::TQ TypeQual, + const char *FixItName, + SourceLocation SpecLoc, + unsigned* QualifierLoc) { + FixItHint Insertion; + if (DS.getTypeQualifiers() & TypeQual) { + if (!(Function.TypeQuals & TypeQual)) { + std::string Name(FixItName); + Name += " "; + Insertion = FixItHint::CreateInsertion(VS.getFirstLocation(), Name.c_str()); + Function.TypeQuals |= TypeQual; + *QualifierLoc = SpecLoc.getRawEncoding(); + } + Diag(SpecLoc, diag::err_declspec_after_virtspec) + << FixItName + << VirtSpecifiers::getSpecifierName(VS.getLastSpecifier()) + << FixItHint::CreateRemoval(SpecLoc) + << Insertion; + } + }; + DeclSpecCheck(DeclSpec::TQ_const, "const", DS.getConstSpecLoc(), + &Function.ConstQualifierLoc); + DeclSpecCheck(DeclSpec::TQ_volatile, "volatile", DS.getVolatileSpecLoc(), + &Function.VolatileQualifierLoc); + DeclSpecCheck(DeclSpec::TQ_restrict, "restrict", DS.getRestrictSpecLoc(), + &Function.RestrictQualifierLoc); + } + + // Parse ref-qualifiers. + bool RefQualifierIsLValueRef = true; + SourceLocation RefQualifierLoc; + if (ParseRefQualifier(RefQualifierIsLValueRef, RefQualifierLoc)) { + const char *Name = (RefQualifierIsLValueRef ? "& " : "&& "); + FixItHint Insertion = FixItHint::CreateInsertion(VS.getFirstLocation(), Name); + Function.RefQualifierIsLValueRef = RefQualifierIsLValueRef; + Function.RefQualifierLoc = RefQualifierLoc.getRawEncoding(); + + Diag(RefQualifierLoc, diag::err_declspec_after_virtspec) + << (RefQualifierIsLValueRef ? "&" : "&&") + << VirtSpecifiers::getSpecifierName(VS.getLastSpecifier()) + << FixItHint::CreateRemoval(RefQualifierLoc) + << Insertion; + D.SetRangeEnd(RefQualifierLoc); } } } @@ -2298,14 +2381,8 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS, bool ExpectSemi = true; // Parse the first declarator. - ParseCXXMemberDeclaratorBeforeInitializer(DeclaratorInfo, VS, BitfieldSize, - LateParsedAttrs); - - // If this has neither a name nor a bit width, something has gone seriously - // wrong. Skip until the semi-colon or }. - if (!DeclaratorInfo.hasName() && BitfieldSize.isUnset()) { - // If so, skip until the semi-colon or a }. - SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch); + if (ParseCXXMemberDeclaratorBeforeInitializer( + DeclaratorInfo, VS, BitfieldSize, LateParsedAttrs)) { TryConsumeToken(tok::semi); return; } @@ -2344,6 +2421,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS, DefinitionKind = FDK_Deleted; } } + DeclaratorInfo.setFunctionDefinitionKind(DefinitionKind); // C++11 [dcl.attr.grammar] p4: If an attribute-specifier-seq appertains // to a friend declaration, that declaration shall be a definition. @@ -2354,7 +2432,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS, ProhibitAttributes(FnAttrs); } - if (DefinitionKind) { + if (DefinitionKind != FDK_Declaration) { if (!DeclaratorInfo.isFunctionDeclarator()) { Diag(DeclaratorInfo.getIdentifierLoc(), diag::err_func_def_no_params); ConsumeBrace(); @@ -2376,7 +2454,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS, Decl *FunDecl = ParseCXXInlineMethodDef(AS, AccessAttrs, DeclaratorInfo, TemplateInfo, - VS, DefinitionKind, Init); + VS, Init); if (FunDecl) { for (unsigned i = 0, ni = CommonLateParsedAttrs.size(); i < ni; ++i) { @@ -2530,16 +2608,17 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS, // Parse the next declarator. DeclaratorInfo.clear(); VS.clear(); - BitfieldSize = true; - Init = true; + BitfieldSize = ExprResult(/*Invalid=*/false); + Init = ExprResult(/*Invalid=*/false); HasInitializer = false; DeclaratorInfo.setCommaLoc(CommaLoc); // GNU attributes are allowed before the second and subsequent declarator. MaybeParseGNUAttributes(DeclaratorInfo); - ParseCXXMemberDeclaratorBeforeInitializer(DeclaratorInfo, VS, BitfieldSize, - LateParsedAttrs); + if (ParseCXXMemberDeclaratorBeforeInitializer( + DeclaratorInfo, VS, BitfieldSize, LateParsedAttrs)) + break; } if (ExpectSemi && @@ -2617,6 +2696,55 @@ ExprResult Parser::ParseCXXMemberInitializer(Decl *D, bool IsFunction, return ParseInitializer(); } +void Parser::SkipCXXMemberSpecification(SourceLocation RecordLoc, + SourceLocation AttrFixitLoc, + unsigned TagType, Decl *TagDecl) { + // Skip the optional 'final' keyword. + if (getLangOpts().CPlusPlus && Tok.is(tok::identifier)) { + assert(isCXX11FinalKeyword() && "not a class definition"); + ConsumeToken(); + + // Diagnose any C++11 attributes after 'final' keyword. + // We deliberately discard these attributes. + ParsedAttributesWithRange Attrs(AttrFactory); + CheckMisplacedCXX11Attribute(Attrs, AttrFixitLoc); + + // This can only happen if we had malformed misplaced attributes; + // we only get called if there is a colon or left-brace after the + // attributes. + if (Tok.isNot(tok::colon) && Tok.isNot(tok::l_brace)) + return; + } + + // Skip the base clauses. This requires actually parsing them, because + // otherwise we can't be sure where they end (a left brace may appear + // within a template argument). + if (Tok.is(tok::colon)) { + // Enter the scope of the class so that we can correctly parse its bases. + ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope); + ParsingClassDefinition ParsingDef(*this, TagDecl, /*NonNestedClass*/ true, + TagType == DeclSpec::TST_interface); + Actions.ActOnTagStartSkippedDefinition(getCurScope(), TagDecl); + + // Parse the bases but don't attach them to the class. + ParseBaseClause(nullptr); + + Actions.ActOnTagFinishSkippedDefinition(); + + if (!Tok.is(tok::l_brace)) { + Diag(PP.getLocForEndOfToken(PrevTokLocation), + diag::err_expected_lbrace_after_base_specifiers); + return; + } + } + + // Skip the body. + assert(Tok.is(tok::l_brace)); + BalancedDelimiterTracker T(*this, tok::l_brace); + T.consumeOpen(); + T.skipToEnd(); +} + /// ParseCXXMemberSpecification - Parse the class definition. /// /// member-specification: @@ -2911,6 +3039,10 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc, ParseLexedMemberInitializers(getCurrentClass()); ParseLexedMethodDefs(getCurrentClass()); PrevTokLocation = SavedPrevTokLocation; + + // We've finished parsing everything, including default argument + // initializers. + Actions.ActOnFinishCXXMemberDefaultArgs(TagDecl); } if (TagDecl) @@ -2965,9 +3097,11 @@ void Parser::DiagnoseUnexpectedNamespace(NamedDecl *D) { /// mem-initializer ...[opt] /// mem-initializer ...[opt] , mem-initializer-list void Parser::ParseConstructorInitializer(Decl *ConstructorDecl) { - assert(Tok.is(tok::colon) && "Constructor initializer always starts with ':'"); + assert(Tok.is(tok::colon) && + "Constructor initializer always starts with ':'"); - // Poison the SEH identifiers so they are flagged as illegal in constructor initializers + // Poison the SEH identifiers so they are flagged as illegal in constructor + // initializers. PoisonSEHIdentifiersRAIIObject PoisonSEHIdentifiers(*this, true); SourceLocation ColonLoc = ConsumeToken(); @@ -3396,7 +3530,9 @@ IdentifierInfo *Parser::TryParseCXX11AttributeIdentifier(SourceLocation &Loc) { // Alternative tokens do not have identifier info, but their spelling // starts with an alphabetical character. SmallString<8> SpellingBuf; - StringRef Spelling = PP.getSpelling(Tok.getLocation(), SpellingBuf); + SourceLocation SpellingLoc = + PP.getSourceManager().getSpellingLoc(Tok.getLocation()); + StringRef Spelling = PP.getSpelling(SpellingLoc, SpellingBuf); if (isLetter(Spelling[0])) { Loc = ConsumeToken(); return &PP.getIdentifierTable().get(Spelling); @@ -3474,7 +3610,6 @@ bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName, // The attribute was allowed to have arguments, but none were provided // even though the attribute parsed successfully. This is an error. Diag(LParenLoc, diag::err_attribute_requires_arguments) << AttrName; - return false; } else if (!Attr->getMaxArgs()) { // The attribute parsed successfully, but was not allowed to have any // arguments. It doesn't matter whether any were provided -- the @@ -3482,7 +3617,6 @@ bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName, Diag(LParenLoc, diag::err_cxx11_attribute_forbids_arguments) << AttrName << FixItHint::CreateRemoval(SourceRange(LParenLoc, *EndLoc)); - return false; } } } diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp index d0d97de..95a28a8 100644 --- a/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp @@ -347,7 +347,11 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) { RHS = ParseCastExpression(false); if (RHS.isInvalid()) { + // FIXME: Errors generated by the delayed typo correction should be + // printed before errors from parsing the RHS, not after. Actions.CorrectDelayedTyposInExpr(LHS); + if (TernaryMiddle.isUsable()) + TernaryMiddle = Actions.CorrectDelayedTyposInExpr(TernaryMiddle); LHS = ExprError(); } @@ -380,7 +384,11 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) { RHSIsInitList = false; if (RHS.isInvalid()) { + // FIXME: Errors generated by the delayed typo correction should be + // printed before errors from ParseRHSOfBinaryExpression, not after. Actions.CorrectDelayedTyposInExpr(LHS); + if (TernaryMiddle.isUsable()) + TernaryMiddle = Actions.CorrectDelayedTyposInExpr(TernaryMiddle); LHS = ExprError(); } @@ -446,8 +454,8 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression, namespace { class CastExpressionIdValidator : public CorrectionCandidateCallback { public: - CastExpressionIdValidator(bool AllowTypes, bool AllowNonTypes) - : AllowNonTypes(AllowNonTypes) { + CastExpressionIdValidator(Token Next, bool AllowTypes, bool AllowNonTypes) + : NextToken(Next), AllowNonTypes(AllowNonTypes) { WantTypeSpecifiers = WantFunctionLikeCasts = AllowTypes; } @@ -458,11 +466,24 @@ class CastExpressionIdValidator : public CorrectionCandidateCallback { if (isa<TypeDecl>(ND)) return WantTypeSpecifiers; - return AllowNonTypes && - CorrectionCandidateCallback::ValidateCandidate(candidate); + + if (!AllowNonTypes || !CorrectionCandidateCallback::ValidateCandidate(candidate)) + return false; + + if (!(NextToken.is(tok::equal) || NextToken.is(tok::arrow) || + NextToken.is(tok::period))) + return true; + + for (auto *C : candidate) { + NamedDecl *ND = C->getUnderlyingDecl(); + if (isa<ValueDecl>(ND) && !isa<FunctionDecl>(ND)) + return true; + } + return false; } private: + Token NextToken; bool AllowNonTypes; }; } @@ -908,14 +929,20 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression, SourceLocation TemplateKWLoc; Token Replacement; auto Validator = llvm::make_unique<CastExpressionIdValidator>( - isTypeCast != NotTypeCast, isTypeCast != IsTypeCast); + Tok, isTypeCast != NotTypeCast, isTypeCast != IsTypeCast); Validator->IsAddressOfOperand = isAddressOfOperand; - Validator->WantRemainingKeywords = Tok.isNot(tok::r_paren); + if (Tok.is(tok::periodstar) || Tok.is(tok::arrowstar)) { + Validator->WantExpressionKeywords = false; + Validator->WantRemainingKeywords = false; + } else { + Validator->WantRemainingKeywords = Tok.isNot(tok::r_paren); + } Name.setIdentifier(&II, ILoc); Res = Actions.ActOnIdExpression( getCurScope(), ScopeSpec, TemplateKWLoc, Name, Tok.is(tok::l_paren), isAddressOfOperand, std::move(Validator), - /*IsInlineAsmIdentifier=*/false, &Replacement); + /*IsInlineAsmIdentifier=*/false, + Tok.is(tok::r_paren) ? nullptr : &Replacement); if (!Res.isInvalid() && !Res.get()) { UnconsumeToken(Replacement); return ParseCastExpression(isUnaryExpression, isAddressOfOperand, @@ -1441,10 +1468,14 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) { if (OpKind == tok::l_paren || !LHS.isInvalid()) { if (Tok.isNot(tok::r_paren)) { - if (ParseExpressionList(ArgExprs, CommaLocs, &Sema::CodeCompleteCall, - LHS.get())) { + if (ParseExpressionList(ArgExprs, CommaLocs, [&] { + Actions.CodeCompleteCall(getCurScope(), LHS.get(), ArgExprs); + })) { (void)Actions.CorrectDelayedTyposInExpr(LHS); LHS = ExprError(); + } else if (LHS.isInvalid()) { + for (auto &E : ArgExprs) + Actions.CorrectDelayedTyposInExpr(E); } } } @@ -1453,7 +1484,19 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) { if (LHS.isInvalid()) { SkipUntil(tok::r_paren, StopAtSemi); } else if (Tok.isNot(tok::r_paren)) { - PT.consumeClose(); + bool HadDelayedTypo = false; + if (Actions.CorrectDelayedTyposInExpr(LHS).get() != LHS.get()) + HadDelayedTypo = true; + for (auto &E : ArgExprs) + if (Actions.CorrectDelayedTyposInExpr(E).get() != E) + HadDelayedTypo = true; + // If there were delayed typos in the LHS or ArgExprs, call SkipUntil + // instead of PT.consumeClose() to avoid emitting extra diagnostics for + // the unmatched l_paren. + if (HadDelayedTypo) + SkipUntil(tok::r_paren, StopAtSemi); + else + PT.consumeClose(); LHS = ExprError(); } else { assert((ArgExprs.size() == 0 || @@ -1510,14 +1553,14 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) { cutOffParsing(); return ExprError(); } - + if (MayBePseudoDestructor && !LHS.isInvalid()) { LHS = ParseCXXPseudoDestructor(LHS.get(), OpLoc, OpKind, SS, ObjectType); break; } - // Either the action has told is that this cannot be a + // Either the action has told us that this cannot be a // pseudo-destructor expression (based on the type of base // expression), or we didn't see a '~' in the right place. We // can still parse a destructor name here, but in that case it @@ -1526,7 +1569,8 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) { // FIXME: Add support for explicit call of template constructor. SourceLocation TemplateKWLoc; UnqualifiedId Name; - if (getLangOpts().ObjC2 && OpKind == tok::period && Tok.is(tok::kw_class)) { + if (getLangOpts().ObjC2 && OpKind == tok::period && + Tok.is(tok::kw_class)) { // Objective-C++: // After a '.' in a member access expression, treat the keyword // 'class' as if it were an identifier. @@ -1551,8 +1595,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) { LHS = Actions.ActOnMemberAccessExpr(getCurScope(), LHS.get(), OpLoc, OpKind, SS, TemplateKWLoc, Name, CurParsedObjCImpl ? CurParsedObjCImpl->Dcl - : nullptr, - Tok.is(tok::l_paren)); + : nullptr); break; } case tok::plusplus: // postfix-expression: postfix-expression '++' @@ -2088,6 +2131,17 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, if (!getCurScope()->getFnParent() && !getCurScope()->getBlockParent()) { Result = ExprError(Diag(OpenLoc, diag::err_stmtexpr_file_scope)); } else { + // Find the nearest non-record decl context. Variables declared in a + // statement expression behave as if they were declared in the enclosing + // function, block, or other code construct. + DeclContext *CodeDC = Actions.CurContext; + while (CodeDC->isRecord() || isa<EnumDecl>(CodeDC)) { + CodeDC = CodeDC->getParent(); + assert(CodeDC && !CodeDC->isFileContext() && + "statement expr not in code context"); + } + Sema::ContextRAII SavedContext(Actions, CodeDC, /*NewThisContext=*/false); + Actions.ActOnStartStmtExpr(); StmtResult Stmt(ParseCompoundStatement(true)); @@ -2256,6 +2310,11 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, InMessageExpressionRAIIObject InMessage(*this, false); Result = ParseExpression(MaybeTypeCast); + if (!getLangOpts().CPlusPlus && MaybeTypeCast && Result.isUsable()) { + // Correct typos in non-C++ code earlier so that implicit-cast-like + // expressions are parsed correctly. + Result = Actions.CorrectDelayedTyposInExpr(Result); + } ExprType = SimpleExpr; if (isFoldOperator(Tok.getKind()) && NextToken().is(tok::ellipsis)) @@ -2354,7 +2413,8 @@ ExprResult Parser::ParseGenericSelectionExpression() { // C11 6.5.1.1p3 "The controlling expression of a generic selection is // not evaluated." EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated); - ControllingExpr = ParseAssignmentExpression(); + ControllingExpr = + Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression()); if (ControllingExpr.isInvalid()) { SkipUntil(tok::r_paren, StopAtSemi); return ExprError(); @@ -2400,7 +2460,8 @@ ExprResult Parser::ParseGenericSelectionExpression() { // FIXME: These expressions should be parsed in a potentially potentially // evaluated context. - ExprResult ER(ParseAssignmentExpression()); + ExprResult ER( + Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression())); if (ER.isInvalid()) { SkipUntil(tok::r_paren, StopAtSemi); return ExprError(); @@ -2493,17 +2554,14 @@ ExprResult Parser::ParseFoldExpression(ExprResult LHS, /// [C++0x] assignment-expression /// [C++0x] braced-init-list /// \endverbatim -bool Parser::ParseExpressionList(SmallVectorImpl<Expr*> &Exprs, +bool Parser::ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, - void (Sema::*Completer)(Scope *S, - Expr *Data, - ArrayRef<Expr *> Args), - Expr *Data) { + std::function<void()> Completer) { bool SawError = false; while (1) { if (Tok.is(tok::code_completion)) { if (Completer) - (Actions.*Completer)(getCurScope(), Data, Exprs); + Completer(); else Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Expression); cutOffParsing(); diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp index 67496ed..ed9f75d 100644 --- a/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp @@ -118,6 +118,7 @@ void Parser::CheckForLParenAfterColonColon() { // Eat the '('. ConsumeParen(); Token RParen; + RParen.setLocation(SourceLocation()); // Do we have a ')' ? NextTok = StarTok.is(tok::star) ? GetLookAheadToken(2) : GetLookAheadToken(1); if (NextTok.is(tok::r_paren)) { @@ -194,6 +195,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, if (Tok.is(tok::annot_cxxscope)) { assert(!LastII && "want last identifier but have already annotated scope"); + assert(!MayBePseudoDestructor && "unexpected annot_cxxscope"); Actions.RestoreNestedNameSpecifierAnnotation(Tok.getAnnotationValue(), Tok.getAnnotationRange(), SS); @@ -208,6 +210,13 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, SS = TemplateId->SS; } + // Has to happen before any "return false"s in this function. + bool CheckForDestructor = false; + if (MayBePseudoDestructor && *MayBePseudoDestructor) { + CheckForDestructor = true; + *MayBePseudoDestructor = false; + } + if (LastII) *LastII = nullptr; @@ -244,12 +253,6 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, return Actions.ActOnSuperScopeSpecifier(SuperLoc, ConsumeToken(), SS); } - bool CheckForDestructor = false; - if (MayBePseudoDestructor && *MayBePseudoDestructor) { - CheckForDestructor = true; - *MayBePseudoDestructor = false; - } - if (!HasScopeSpecifier && (Tok.is(tok::kw_decltype) || Tok.is(tok::annot_decltype))) { DeclSpec DS(AttrFactory); @@ -659,7 +662,8 @@ ExprResult Parser::ParseCXXIdExpression(bool isAddressOfOperand) { ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false); Token Replacement; - ExprResult Result = tryParseCXXIdExpression(SS, isAddressOfOperand, Replacement); + ExprResult Result = + tryParseCXXIdExpression(SS, isAddressOfOperand, Replacement); if (Result.isUnset()) { // If the ExprResult is valid but null, then typo correction suggested a // keyword replacement that needs to be reparsed. @@ -1090,6 +1094,10 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer( // compatible with GCC. MaybeParseGNUAttributes(Attr, &DeclEndLoc); + // MSVC-style attributes must be parsed before the mutable specifier to be + // compatible with MSVC. + MaybeParseMicrosoftDeclSpecs(Attr, &DeclEndLoc); + // Parse 'mutable'[opt]. SourceLocation MutableLoc; if (TryConsumeToken(tok::kw_mutable, MutableLoc)) @@ -1485,9 +1493,8 @@ Parser::ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, ParseDecltypeSpecifier(DS); if (DS.getTypeSpecType() == TST_error) return ExprError(); - return Actions.ActOnPseudoDestructorExpr(getCurScope(), Base, OpLoc, - OpKind, TildeLoc, DS, - Tok.is(tok::l_paren)); + return Actions.ActOnPseudoDestructorExpr(getCurScope(), Base, OpLoc, OpKind, + TildeLoc, DS); } if (!Tok.is(tok::identifier)) { @@ -1510,11 +1517,9 @@ Parser::ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, /*AssumeTemplateName=*/true)) return ExprError(); - return Actions.ActOnPseudoDestructorExpr(getCurScope(), Base, - OpLoc, OpKind, - SS, FirstTypeName, CCLoc, - TildeLoc, SecondTypeName, - Tok.is(tok::l_paren)); + return Actions.ActOnPseudoDestructorExpr(getCurScope(), Base, OpLoc, OpKind, + SS, FirstTypeName, CCLoc, TildeLoc, + SecondTypeName); } /// ParseCXXBoolLiteral - This handles the C++ Boolean literals. @@ -1602,7 +1607,11 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) { CommaLocsTy CommaLocs; if (Tok.isNot(tok::r_paren)) { - if (ParseExpressionList(Exprs, CommaLocs)) { + if (ParseExpressionList(Exprs, CommaLocs, [&] { + Actions.CodeCompleteConstructor(getCurScope(), + TypeRep.get()->getCanonicalTypeInternal(), + DS.getLocEnd(), Exprs); + })) { SkipUntil(tok::r_paren, StopAtSemi); return ExprError(); } @@ -2509,14 +2518,23 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext, } // If the user wrote ~T::T, correct it to T::~T. + DeclaratorScopeObj DeclScopeObj(*this, SS); if (!TemplateSpecified && NextToken().is(tok::coloncolon)) { + // Don't let ParseOptionalCXXScopeSpecifier() "correct" + // `int A; struct { ~A::A(); };` to `int A; struct { ~A:A(); };`, + // it will confuse this recovery logic. + ColonProtectionRAIIObject ColonRAII(*this, false); + if (SS.isSet()) { AnnotateScopeToken(SS, /*NewAnnotation*/true); SS.clear(); } if (ParseOptionalCXXScopeSpecifier(SS, ObjectType, EnteringContext)) return true; - if (Tok.isNot(tok::identifier) || NextToken().is(tok::coloncolon)) { + if (SS.isNotEmpty()) + ObjectType = ParsedType(); + if (Tok.isNot(tok::identifier) || NextToken().is(tok::coloncolon) || + !SS.isSet()) { Diag(TildeLoc, diag::err_destructor_tilde_scope); return true; } @@ -2525,6 +2543,10 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext, Diag(TildeLoc, diag::err_destructor_tilde_scope) << FixItHint::CreateRemoval(TildeLoc) << FixItHint::CreateInsertion(Tok.getLocation(), "~"); + + // Temporarily enter the scope for the rest of this function. + if (Actions.ShouldEnterDeclaratorScope(getCurScope(), SS)) + DeclScopeObj.EnterDeclaratorScope(); } // Parse the class-name (or template-name in a simple-template-id). @@ -2668,7 +2690,14 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) { ConstructorLParen = T.getOpenLocation(); if (Tok.isNot(tok::r_paren)) { CommaLocsTy CommaLocs; - if (ParseExpressionList(ConstructorArgs, CommaLocs)) { + if (ParseExpressionList(ConstructorArgs, CommaLocs, [&] { + ParsedType TypeRep = Actions.ActOnTypeName(getCurScope(), + DeclaratorInfo).get(); + Actions.CodeCompleteConstructor(getCurScope(), + TypeRep.get()->getCanonicalTypeInternal(), + DeclaratorInfo.getLocEnd(), + ConstructorArgs); + })) { SkipUntil(tok::semi, StopAtSemi | StopBeforeMatch); return ExprError(); } diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp index 7fe9862..42287d6 100644 --- a/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp @@ -148,7 +148,7 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator() { Diag(NameLoc, diag::ext_gnu_old_style_field_designator) << FixItHint::CreateReplacement(SourceRange(NameLoc, ColonLoc), - NewSyntax.str()); + NewSyntax); Designation D; D.AddDesignator(Designator::getField(FieldName, SourceLocation(), NameLoc)); diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp index a597a16..691f53f 100644 --- a/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp @@ -240,7 +240,7 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, SmallVector<Decl *, 8> ProtocolRefs; SmallVector<SourceLocation, 8> ProtocolLocs; if (Tok.is(tok::less) && - ParseObjCProtocolReferences(ProtocolRefs, ProtocolLocs, true, + ParseObjCProtocolReferences(ProtocolRefs, ProtocolLocs, true, true, LAngleLoc, EndProtoLoc)) return nullptr; @@ -286,7 +286,7 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, SmallVector<SourceLocation, 8> ProtocolLocs; SourceLocation LAngleLoc, EndProtoLoc; if (Tok.is(tok::less) && - ParseObjCProtocolReferences(ProtocolRefs, ProtocolLocs, true, + ParseObjCProtocolReferences(ProtocolRefs, ProtocolLocs, true, true, LAngleLoc, EndProtoLoc)) return nullptr; @@ -1151,7 +1151,7 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc, bool Parser:: ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &Protocols, SmallVectorImpl<SourceLocation> &ProtocolLocs, - bool WarnOnDeclarations, + bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndLoc) { assert(Tok.is(tok::less) && "expected <"); @@ -1186,7 +1186,7 @@ ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &Protocols, return true; // Convert the list of protocols identifiers into a list of protocol decls. - Actions.FindProtocolDeclaration(WarnOnDeclarations, + Actions.FindProtocolDeclaration(WarnOnDeclarations, ForObjCContainer, &ProtocolIdents[0], ProtocolIdents.size(), Protocols); return false; @@ -1201,6 +1201,7 @@ bool Parser::ParseObjCProtocolQualifiers(DeclSpec &DS) { SmallVector<Decl *, 8> ProtocolDecl; SmallVector<SourceLocation, 8> ProtocolLocs; bool Result = ParseObjCProtocolReferences(ProtocolDecl, ProtocolLocs, false, + false, LAngleLoc, EndProtoLoc); DS.setProtocolQualifiers(ProtocolDecl.data(), ProtocolDecl.size(), ProtocolLocs.data(), LAngleLoc); @@ -1416,7 +1417,7 @@ Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc, SmallVector<Decl *, 8> ProtocolRefs; SmallVector<SourceLocation, 8> ProtocolLocs; if (Tok.is(tok::less) && - ParseObjCProtocolReferences(ProtocolRefs, ProtocolLocs, false, + ParseObjCProtocolReferences(ProtocolRefs, ProtocolLocs, false, true, LAngleLoc, EndProtoLoc)) return DeclGroupPtrTy(); diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseOpenMP.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseOpenMP.cpp index 764619a..187289ee 100644 --- a/contrib/llvm/tools/clang/lib/Parse/ParseOpenMP.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/ParseOpenMP.cpp @@ -223,6 +223,7 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(bool StandAloneAllowed) { ParseScope OMPDirectiveScope(this, ScopeFlags); Actions.StartOpenMPDSABlock(DKind, DirName, Actions.getCurScope(), Loc); + Actions.StartOpenMPClauses(); while (Tok.isNot(tok::annot_pragma_openmp_end)) { OpenMPClauseKind CKind = Tok.isAnnotation() @@ -242,6 +243,7 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(bool StandAloneAllowed) { if (Tok.is(tok::comma)) ConsumeToken(); } + Actions.EndOpenMPClauses(); // End location of the directive. EndLoc = Tok.getLocation(); // Consume final annot_pragma_openmp_end. @@ -257,13 +259,8 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(bool StandAloneAllowed) { // Parse statement AssociatedStmt = ParseStatement(); Actions.ActOnFinishOfCompoundStmt(); - if (!AssociatedStmt.isUsable()) { - Actions.ActOnCapturedRegionError(); - CreateDirective = false; - } else { - AssociatedStmt = Actions.ActOnCapturedRegionEnd(AssociatedStmt.get()); - CreateDirective = AssociatedStmt.isUsable(); - } + AssociatedStmt = Actions.ActOnOpenMPRegionEnd(AssociatedStmt, Clauses); + CreateDirective = AssociatedStmt.isUsable(); } if (CreateDirective) Directive = Actions.ActOnOpenMPExecutableDirective( diff --git a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp index 473be54..a8641ef 100644 --- a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp @@ -198,9 +198,12 @@ void Parser::initializePragmaHandlers() { OpenMPHandler.reset(new PragmaNoOpenMPHandler()); PP.AddPragmaHandler(OpenMPHandler.get()); - if (getLangOpts().MicrosoftExt) { + if (getLangOpts().MicrosoftExt || getTargetInfo().getTriple().isPS4()) { MSCommentHandler.reset(new PragmaCommentHandler(Actions)); PP.AddPragmaHandler(MSCommentHandler.get()); + } + + if (getLangOpts().MicrosoftExt) { MSDetectMismatchHandler.reset(new PragmaDetectMismatchHandler(Actions)); PP.AddPragmaHandler(MSDetectMismatchHandler.get()); MSPointersToMembers.reset(new PragmaMSPointersToMembers()); @@ -261,9 +264,12 @@ void Parser::resetPragmaHandlers() { PP.RemovePragmaHandler(OpenMPHandler.get()); OpenMPHandler.reset(); - if (getLangOpts().MicrosoftExt) { + if (getLangOpts().MicrosoftExt || getTargetInfo().getTriple().isPS4()) { PP.RemovePragmaHandler(MSCommentHandler.get()); MSCommentHandler.reset(); + } + + if (getLangOpts().MicrosoftExt) { PP.RemovePragmaHandler(MSDetectMismatchHandler.get()); MSDetectMismatchHandler.reset(); PP.RemovePragmaHandler(MSPointersToMembers.get()); @@ -900,6 +906,7 @@ void PragmaGCCVisibilityHandler::HandlePragma(Preprocessor &PP, << "visibility"; return; } + SourceLocation EndLoc = Tok.getLocation(); PP.LexUnexpandedToken(Tok); if (Tok.isNot(tok::eod)) { PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol) @@ -911,6 +918,7 @@ void PragmaGCCVisibilityHandler::HandlePragma(Preprocessor &PP, Toks[0].startToken(); Toks[0].setKind(tok::annot_pragma_vis); Toks[0].setLocation(VisLoc); + Toks[0].setAnnotationEndLoc(EndLoc); Toks[0].setAnnotationValue( const_cast<void*>(static_cast<const void*>(VisType))); PP.EnterTokenStream(Toks, 1, /*DisableMacroExpansion=*/true, @@ -1030,6 +1038,7 @@ void PragmaPackHandler::HandlePragma(Preprocessor &PP, Toks[0].startToken(); Toks[0].setKind(tok::annot_pragma_pack); Toks[0].setLocation(PackLoc); + Toks[0].setAnnotationEndLoc(RParenLoc); Toks[0].setAnnotationValue(static_cast<void*>(Info)); PP.EnterTokenStream(Toks, 1, /*DisableMacroExpansion=*/true, /*OwnsTokens=*/false); @@ -1048,6 +1057,7 @@ void PragmaMSStructHandler::HandlePragma(Preprocessor &PP, PP.Diag(Tok.getLocation(), diag::warn_pragma_ms_struct); return; } + SourceLocation EndLoc = Tok.getLocation(); const IdentifierInfo *II = Tok.getIdentifierInfo(); if (II->isStr("on")) { Kind = Sema::PMSST_ON; @@ -1073,6 +1083,7 @@ void PragmaMSStructHandler::HandlePragma(Preprocessor &PP, Toks[0].startToken(); Toks[0].setKind(tok::annot_pragma_msstruct); Toks[0].setLocation(MSStructTok.getLocation()); + Toks[0].setAnnotationEndLoc(EndLoc); Toks[0].setAnnotationValue(reinterpret_cast<void*>( static_cast<uintptr_t>(Kind))); PP.EnterTokenStream(Toks, 1, /*DisableMacroExpansion=*/true, @@ -1128,6 +1139,7 @@ static void ParseAlignPragma(Preprocessor &PP, Token &FirstTok, return; } + SourceLocation EndLoc = Tok.getLocation(); PP.Lex(Tok); if (Tok.isNot(tok::eod)) { PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol) @@ -1142,6 +1154,7 @@ static void ParseAlignPragma(Preprocessor &PP, Token &FirstTok, Toks[0].startToken(); Toks[0].setKind(tok::annot_pragma_align); Toks[0].setLocation(FirstTok.getLocation()); + Toks[0].setAnnotationEndLoc(EndLoc); Toks[0].setAnnotationValue(reinterpret_cast<void*>( static_cast<uintptr_t>(Kind))); PP.EnterTokenStream(Toks, 1, /*DisableMacroExpansion=*/true, @@ -1285,6 +1298,7 @@ void PragmaWeakHandler::HandlePragma(Preprocessor &PP, pragmaUnusedTok.startToken(); pragmaUnusedTok.setKind(tok::annot_pragma_weakalias); pragmaUnusedTok.setLocation(WeakLoc); + pragmaUnusedTok.setAnnotationEndLoc(AliasName.getLocation()); Toks[1] = WeakName; Toks[2] = AliasName; PP.EnterTokenStream(Toks, 3, @@ -1297,6 +1311,7 @@ void PragmaWeakHandler::HandlePragma(Preprocessor &PP, pragmaUnusedTok.startToken(); pragmaUnusedTok.setKind(tok::annot_pragma_weak); pragmaUnusedTok.setLocation(WeakLoc); + pragmaUnusedTok.setAnnotationEndLoc(WeakLoc); Toks[1] = WeakName; PP.EnterTokenStream(Toks, 2, /*DisableMacroExpansion=*/true, /*OwnsTokens=*/false); @@ -1342,6 +1357,7 @@ void PragmaRedefineExtnameHandler::HandlePragma(Preprocessor &PP, pragmaRedefTok.startToken(); pragmaRedefTok.setKind(tok::annot_pragma_redefine_extname); pragmaRedefTok.setLocation(RedefLoc); + pragmaRedefTok.setAnnotationEndLoc(AliasName.getLocation()); Toks[1] = RedefName; Toks[2] = AliasName; PP.EnterTokenStream(Toks, 3, @@ -1364,6 +1380,7 @@ PragmaFPContractHandler::HandlePragma(Preprocessor &PP, Toks[0].startToken(); Toks[0].setKind(tok::annot_pragma_fp_contract); Toks[0].setLocation(Tok.getLocation()); + Toks[0].setAnnotationEndLoc(Tok.getLocation()); Toks[0].setAnnotationValue(reinterpret_cast<void*>( static_cast<uintptr_t>(OOS))); PP.EnterTokenStream(Toks, 1, /*DisableMacroExpansion=*/true, @@ -1423,6 +1440,7 @@ PragmaOpenCLExtensionHandler::HandlePragma(Preprocessor &PP, Toks[0].setKind(tok::annot_pragma_opencl_extension); Toks[0].setLocation(NameLoc); Toks[0].setAnnotationValue(data.getOpaqueValue()); + Toks[0].setAnnotationEndLoc(StateLoc); PP.EnterTokenStream(Toks, 1, /*DisableMacroExpansion=*/true, /*OwnsTokens=*/false); @@ -1471,7 +1489,7 @@ PragmaOpenMPHandler::HandlePragma(Preprocessor &PP, Token *Toks = new Token[Pragma.size()]; std::copy(Pragma.begin(), Pragma.end(), Toks); PP.EnterTokenStream(Toks, Pragma.size(), - /*DisableMacroExpansion=*/true, /*OwnsTokens=*/true); + /*DisableMacroExpansion=*/false, /*OwnsTokens=*/true); } /// \brief Handle '#pragma pointers_to_members' @@ -1554,6 +1572,7 @@ void PragmaMSPointersToMembers::HandlePragma(Preprocessor &PP, return; } + SourceLocation EndLoc = Tok.getLocation(); PP.Lex(Tok); if (Tok.isNot(tok::eod)) { PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol) @@ -1565,6 +1584,7 @@ void PragmaMSPointersToMembers::HandlePragma(Preprocessor &PP, AnnotTok.startToken(); AnnotTok.setKind(tok::annot_pragma_ms_pointers_to_members); AnnotTok.setLocation(PointersToMembersLoc); + AnnotTok.setAnnotationEndLoc(EndLoc); AnnotTok.setAnnotationValue( reinterpret_cast<void *>(static_cast<uintptr_t>(RepresentationMethod))); PP.EnterToken(AnnotTok); @@ -1644,6 +1664,7 @@ void PragmaMSVtorDisp::HandlePragma(Preprocessor &PP, PP.Diag(VtorDispLoc, diag::warn_pragma_expected_rparen) << "vtordisp"; return; } + SourceLocation EndLoc = Tok.getLocation(); PP.Lex(Tok); if (Tok.isNot(tok::eod)) { PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol) @@ -1656,6 +1677,7 @@ void PragmaMSVtorDisp::HandlePragma(Preprocessor &PP, AnnotTok.startToken(); AnnotTok.setKind(tok::annot_pragma_ms_vtordisp); AnnotTok.setLocation(VtorDispLoc); + AnnotTok.setAnnotationEndLoc(EndLoc); AnnotTok.setAnnotationValue(reinterpret_cast<void *>( static_cast<uintptr_t>((Kind << 16) | (Value & 0xFFFF)))); PP.EnterToken(AnnotTok); @@ -1672,10 +1694,13 @@ void PragmaMSPragma::HandlePragma(Preprocessor &PP, AnnotTok.startToken(); AnnotTok.setKind(tok::annot_pragma_ms_pragma); AnnotTok.setLocation(Tok.getLocation()); + AnnotTok.setAnnotationEndLoc(Tok.getLocation()); SmallVector<Token, 8> TokenVector; // Suck up all of the tokens before the eod. - for (; Tok.isNot(tok::eod); PP.Lex(Tok)) + for (; Tok.isNot(tok::eod); PP.Lex(Tok)) { TokenVector.push_back(Tok); + AnnotTok.setAnnotationEndLoc(Tok.getLocation()); + } // Add a sentinal EoF token to the end of the list. TokenVector.push_back(EoF); // We must allocate this array with new because EnterTokenStream is going to @@ -1786,6 +1811,14 @@ void PragmaCommentHandler::HandlePragma(Preprocessor &PP, return; } + // On PS4, issue a warning about any pragma comments other than + // #pragma comment lib. + if (PP.getTargetInfo().getTriple().isPS4() && Kind != Sema::PCK_Lib) { + PP.Diag(Tok.getLocation(), diag::warn_pragma_comment_ignored) + << II->getName(); + return; + } + // Read the optional string if present. PP.Lex(Tok); std::string ArgumentString; @@ -1994,6 +2027,7 @@ void PragmaLoopHintHandler::HandlePragma(Preprocessor &PP, LoopHintTok.startToken(); LoopHintTok.setKind(tok::annot_pragma_loop_hint); LoopHintTok.setLocation(PragmaName.getLocation()); + LoopHintTok.setAnnotationEndLoc(PragmaName.getLocation()); LoopHintTok.setAnnotationValue(static_cast<void *>(Info)); TokenList.push_back(LoopHintTok); } @@ -2076,6 +2110,7 @@ void PragmaUnrollHintHandler::HandlePragma(Preprocessor &PP, TokenArray[0].startToken(); TokenArray[0].setKind(tok::annot_pragma_loop_hint); TokenArray[0].setLocation(PragmaName.getLocation()); + TokenArray[0].setAnnotationEndLoc(PragmaName.getLocation()); TokenArray[0].setAnnotationValue(static_cast<void *>(Info)); PP.EnterTokenStream(TokenArray, 1, /*DisableMacroExpansion=*/false, /*OwnsTokens=*/true); diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp index 2a5f840..055bdea 100644 --- a/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp @@ -408,12 +408,6 @@ StmtResult Parser::ParseExprStatement() { return Actions.ActOnExprStmt(Expr); } -StmtResult Parser::ParseSEHTryBlock() { - assert(Tok.is(tok::kw___try) && "Expected '__try'"); - SourceLocation Loc = ConsumeToken(); - return ParseSEHTryBlockCommon(Loc); -} - /// ParseSEHTryBlockCommon /// /// seh-try-block: @@ -423,8 +417,11 @@ StmtResult Parser::ParseSEHTryBlock() { /// seh-except-block /// seh-finally-block /// -StmtResult Parser::ParseSEHTryBlockCommon(SourceLocation TryLoc) { - if(Tok.isNot(tok::l_brace)) +StmtResult Parser::ParseSEHTryBlock() { + assert(Tok.is(tok::kw___try) && "Expected '__try'"); + SourceLocation TryLoc = ConsumeToken(); + + if (Tok.isNot(tok::l_brace)) return StmtError(Diag(Tok, diag::err_expected) << tok::l_brace); StmtResult TryBlock(ParseCompoundStatement(/*isStmtExpr=*/false, @@ -441,7 +438,7 @@ StmtResult Parser::ParseSEHTryBlockCommon(SourceLocation TryLoc) { SourceLocation Loc = ConsumeToken(); Handler = ParseSEHFinallyBlock(Loc); } else { - return StmtError(Diag(Tok,diag::err_seh_expected_handler)); + return StmtError(Diag(Tok, diag::err_seh_expected_handler)); } if(Handler.isInvalid()) @@ -466,14 +463,21 @@ StmtResult Parser::ParseSEHExceptBlock(SourceLocation ExceptLoc) { if (ExpectAndConsume(tok::l_paren)) return StmtError(); - ParseScope ExpectScope(this, Scope::DeclScope | Scope::ControlScope); + ParseScope ExpectScope(this, Scope::DeclScope | Scope::ControlScope | + Scope::SEHExceptScope); if (getLangOpts().Borland) { Ident__exception_info->setIsPoisoned(false); Ident___exception_info->setIsPoisoned(false); Ident_GetExceptionInfo->setIsPoisoned(false); } - ExprResult FilterExpr(ParseExpression()); + + ExprResult FilterExpr; + { + ParseScopeFlags FilterScope(this, getCurScope()->getFlags() | + Scope::SEHFilterScope); + FilterExpr = Actions.CorrectDelayedTyposInExpr(ParseExpression()); + } if (getLangOpts().Borland) { Ident__exception_info->setIsPoisoned(true); @@ -487,6 +491,9 @@ StmtResult Parser::ParseSEHExceptBlock(SourceLocation ExceptLoc) { if (ExpectAndConsume(tok::r_paren)) return StmtError(); + if (Tok.isNot(tok::l_brace)) + return StmtError(Diag(Tok, diag::err_expected) << tok::l_brace); + StmtResult Block(ParseCompoundStatement()); if(Block.isInvalid()) @@ -500,16 +507,24 @@ StmtResult Parser::ParseSEHExceptBlock(SourceLocation ExceptLoc) { /// seh-finally-block: /// '__finally' compound-statement /// -StmtResult Parser::ParseSEHFinallyBlock(SourceLocation FinallyBlock) { +StmtResult Parser::ParseSEHFinallyBlock(SourceLocation FinallyLoc) { PoisonIdentifierRAIIObject raii(Ident__abnormal_termination, false), raii2(Ident___abnormal_termination, false), raii3(Ident_AbnormalTermination, false); + if (Tok.isNot(tok::l_brace)) + return StmtError(Diag(Tok, diag::err_expected) << tok::l_brace); + + ParseScope FinallyScope(this, 0); + Actions.ActOnStartSEHFinallyBlock(); + StmtResult Block(ParseCompoundStatement()); - if(Block.isInvalid()) + if(Block.isInvalid()) { + Actions.ActOnAbortSEHFinallyBlock(); return Block; + } - return Actions.ActOnSEHFinallyBlock(FinallyBlock,Block.get()); + return Actions.ActOnFinishSEHFinallyBlock(FinallyLoc, Block.get()); } /// Handle __leave @@ -1253,7 +1268,7 @@ StmtResult Parser::ParseSwitchStatement(SourceLocation *TrailingElseLoc) { // We have incremented the mangling number for the SwitchScope and the // InnerScope, which is one too many. if (C99orCXX) - getCurScope()->decrementMSLocalManglingNumber(); + getCurScope()->decrementMSManglingNumber(); // Read the body statement. StmtResult Body(ParseStatement(TrailingElseLoc)); @@ -1674,6 +1689,12 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) { FirstPart.get(), Collection.get(), T.getCloseLocation()); + } else { + // In OpenMP loop region loop control variable must be captured and be + // private. Perform analysis of first part (if any). + if (getLangOpts().OpenMP && FirstPart.isUsable()) { + Actions.ActOnOpenMPLoopInitialization(ForLoc, FirstPart.get()); + } } // C99 6.8.5p5 - In C99, the body of the for statement is a scope, even if @@ -1695,7 +1716,7 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) { // It will only be incremented if the body contains other things that would // normally increment the mangling number (like a compound statement). if (C99orCXXorObjC) - getCurScope()->decrementMSLocalManglingNumber(); + getCurScope()->decrementMSManglingNumber(); // Read the body statement. StmtResult Body(ParseStatement(TrailingElseLoc)); diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseStmtAsm.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseStmtAsm.cpp index 7bf4da6..8ba9f15 100644 --- a/contrib/llvm/tools/clang/lib/Parse/ParseStmtAsm.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/ParseStmtAsm.cpp @@ -530,7 +530,7 @@ StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) { TheTarget->createMCAsmParser(*STI, *Parser, *MII, MCOptions)); std::unique_ptr<llvm::MCInstPrinter> IP( - TheTarget->createMCInstPrinter(1, *MAI, *MII, *MRI, *STI)); + TheTarget->createMCInstPrinter(llvm::Triple(TT), 1, *MAI, *MII, *MRI)); // Change to the Intel dialect. Parser->setAssemblerDialect(1); @@ -615,6 +615,7 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) { msAsm = true; return ParseMicrosoftAsmStatement(AsmLoc); } + DeclSpec DS(AttrFactory); SourceLocation Loc = Tok.getLocation(); ParseTypeQualifierListOpt(DS, AR_VendorAttributesParsed); @@ -639,6 +640,15 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) { T.consumeOpen(); ExprResult AsmString(ParseAsmStringLiteral()); + + // Check if GNU-style InlineAsm is disabled. + // Error on anything other than empty string. + if (!(getLangOpts().GNUAsm || AsmString.isInvalid())) { + const auto *SL = cast<StringLiteral>(AsmString.get()); + if (!SL->getString().trim().empty()) + Diag(Loc, diag::err_gnu_inline_asm_disabled); + } + if (AsmString.isInvalid()) { // Consume up to and including the closing paren. T.skipToEnd(); diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp index 53de72c..f1467fe 100644 --- a/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp @@ -14,6 +14,7 @@ #include "clang/Parse/Parser.h" #include "RAIIObjectsForParser.h" #include "clang/AST/ASTConsumer.h" +#include "clang/AST/ASTContext.h" #include "clang/AST/DeclTemplate.h" #include "clang/Parse/ParseDiagnostic.h" #include "clang/Sema/DeclSpec.h" @@ -1301,7 +1302,8 @@ void Parser::ParseLateTemplatedFuncDef(LateParsedTemplate &LPT) { TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth); // To restore the context after late parsing. - Sema::ContextRAII GlobalSavedContext(Actions, Actions.CurContext); + Sema::ContextRAII GlobalSavedContext( + Actions, Actions.Context.getTranslationUnitDecl()); SmallVector<ParseScope*, 4> TemplateParamScopeStack; diff --git a/contrib/llvm/tools/clang/lib/Parse/Parser.cpp b/contrib/llvm/tools/clang/lib/Parse/Parser.cpp index 7ccd209..dea7a69 100644 --- a/contrib/llvm/tools/clang/lib/Parse/Parser.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/Parser.cpp @@ -38,6 +38,26 @@ public: return false; } }; + +/// \brief RAIIObject to destroy the contents of a SmallVector of +/// TemplateIdAnnotation pointers and clear the vector. +class DestroyTemplateIdAnnotationsRAIIObj { + SmallVectorImpl<TemplateIdAnnotation *> &Container; + +public: + DestroyTemplateIdAnnotationsRAIIObj( + SmallVectorImpl<TemplateIdAnnotation *> &Container) + : Container(Container) {} + + ~DestroyTemplateIdAnnotationsRAIIObj() { + for (SmallVectorImpl<TemplateIdAnnotation *>::iterator I = + Container.begin(), + E = Container.end(); + I != E; ++I) + (*I)->Destroy(); + Container.clear(); + } +}; } // end anonymous namespace IdentifierInfo *Parser::getSEHExceptKeyword() { @@ -414,6 +434,15 @@ Parser::~Parser() { PP.clearCodeCompletionHandler(); + if (getLangOpts().DelayedTemplateParsing && + !PP.isIncrementalProcessingEnabled() && !TemplateIds.empty()) { + // If an ASTConsumer parsed delay-parsed templates in their + // HandleTranslationUnit() method, TemplateIds created there were not + // guarded by a DestroyTemplateIdAnnotationsRAIIObj object in + // ParseTopLevelDecl(). Destroy them here. + DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(TemplateIds); + } + assert(TemplateIds.empty() && "Still alive TemplateIdAnnotations around?"); } @@ -490,26 +519,6 @@ void Parser::Initialize() { ConsumeToken(); } -namespace { - /// \brief RAIIObject to destroy the contents of a SmallVector of - /// TemplateIdAnnotation pointers and clear the vector. - class DestroyTemplateIdAnnotationsRAIIObj { - SmallVectorImpl<TemplateIdAnnotation *> &Container; - public: - DestroyTemplateIdAnnotationsRAIIObj(SmallVectorImpl<TemplateIdAnnotation *> - &Container) - : Container(Container) {} - - ~DestroyTemplateIdAnnotationsRAIIObj() { - for (SmallVectorImpl<TemplateIdAnnotation *>::iterator I = - Container.begin(), E = Container.end(); - I != E; ++I) - (*I)->Destroy(); - Container.clear(); - } - }; -} - void Parser::LateTemplateParserCleanupCallback(void *P) { // While this RAII helper doesn't bracket any actual work, the destructor will // clean up annotations that were created during ActOnEndOfTranslationUnit @@ -541,8 +550,14 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result) { return false; case tok::annot_module_begin: + Actions.ActOnModuleBegin(Tok.getLocation(), reinterpret_cast<Module *>( + Tok.getAnnotationValue())); + ConsumeToken(); + return false; + case tok::annot_module_end: - // FIXME: Update visibility based on the submodule we're in. + Actions.ActOnModuleEnd(Tok.getLocation(), reinterpret_cast<Module *>( + Tok.getAnnotationValue())); ConsumeToken(); return false; @@ -669,8 +684,18 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs, SourceLocation StartLoc = Tok.getLocation(); SourceLocation EndLoc; + ExprResult Result(ParseSimpleAsm(&EndLoc)); + // Check if GNU-style InlineAsm is disabled. + // Empty asm string is allowed because it will not introduce + // any assembly code. + if (!(getLangOpts().GNUAsm || Result.isInvalid())) { + const auto *SL = cast<StringLiteral>(Result.get()); + if (!SL->getString().trim().empty()) + Diag(StartLoc, diag::err_gnu_inline_asm_disabled); + } + ExpectAndConsume(tok::semi, diag::err_expected_after, "top-level asm block"); @@ -1048,7 +1073,6 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D, if (TryConsumeToken(tok::equal)) { assert(getLangOpts().CPlusPlus && "Only C++ function definitions have '='"); - Actions.ActOnFinishFunctionBody(Res, nullptr, false); bool Delete = false; SourceLocation KWLoc; @@ -1076,6 +1100,8 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D, SkipUntil(tok::semi); } + Stmt *GeneratedBody = Res ? Res->getBody() : nullptr; + Actions.ActOnFinishFunctionBody(Res, GeneratedBody, false); return Res; } diff --git a/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h b/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h index a0c9c1f..36d87eb 100644 --- a/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h +++ b/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h @@ -58,6 +58,12 @@ namespace clang { Active = false; } } + SuppressAccessChecks(SuppressAccessChecks &&Other) + : S(Other.S), DiagnosticPool(std::move(Other.DiagnosticPool)), + State(Other.State), Active(Other.Active) { + Other.Active = false; + } + void operator=(SuppressAccessChecks &&Other) = delete; void done() { assert(Active && "trying to end an inactive suppression"); @@ -87,8 +93,8 @@ namespace clang { Sema::ParsingDeclState State; bool Popped; - ParsingDeclRAIIObject(const ParsingDeclRAIIObject &) LLVM_DELETED_FUNCTION; - void operator=(const ParsingDeclRAIIObject &) LLVM_DELETED_FUNCTION; + ParsingDeclRAIIObject(const ParsingDeclRAIIObject &) = delete; + void operator=(const ParsingDeclRAIIObject &) = delete; public: enum NoParent_t { NoParent }; @@ -244,8 +250,8 @@ namespace clang { /// the way they used to be. This is used to handle __extension__ in the /// parser. class ExtensionRAIIObject { - ExtensionRAIIObject(const ExtensionRAIIObject &) LLVM_DELETED_FUNCTION; - void operator=(const ExtensionRAIIObject &) LLVM_DELETED_FUNCTION; + ExtensionRAIIObject(const ExtensionRAIIObject &) = delete; + void operator=(const ExtensionRAIIObject &) = delete; DiagnosticsEngine &Diags; public: @@ -423,7 +429,13 @@ namespace clang { if (P.Tok.is(Close)) { LClose = (P.*Consumer)(); return false; - } + } else if (P.Tok.is(tok::semi) && P.NextToken().is(Close)) { + SourceLocation SemiLoc = P.ConsumeToken(); + P.Diag(SemiLoc, diag::err_unexpected_semi) + << Close << FixItHint::CreateRemoval(SourceRange(SemiLoc, SemiLoc)); + LClose = (P.*Consumer)(); + return false; + } return diagnoseMissingClose(); } diff --git a/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp index 1c82ee4..451ad07 100644 --- a/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp +++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp @@ -89,9 +89,9 @@ namespace { bool IsLeaf; RopePieceBTreeNode(bool isLeaf) : Size(0), IsLeaf(isLeaf) {} - ~RopePieceBTreeNode() {} - public: + ~RopePieceBTreeNode() = default; + public: bool isLeaf() const { return IsLeaf; } unsigned size() const { return Size; } diff --git a/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp b/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp index 60cdcf7..be09a36 100644 --- a/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp +++ b/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp @@ -54,7 +54,7 @@ void RewriteBuffer::RemoveText(unsigned OrigOffset, unsigned Size, if (Size == 0) return; unsigned RealOffset = getMappedOffset(OrigOffset, true); - assert(RealOffset+Size < Buffer.size() && "Invalid location"); + assert(RealOffset+Size <= Buffer.size() && "Invalid location"); // Remove the dead characters. Buffer.erase(RealOffset, Size); diff --git a/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp b/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp index f666a9b..d697ecb 100644 --- a/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp @@ -117,6 +117,7 @@ static void CheckUnreachable(Sema &S, AnalysisDeclContext &AC) { reachable_code::FindUnreachableCode(AC, S.getPreprocessor(), UC); } +namespace { /// \brief Warn on logical operator errors in CFGBuilder class LogicalErrorHandler : public CFGCallback { Sema &S; @@ -138,7 +139,7 @@ public: return false; } - void compareAlwaysTrue(const BinaryOperator *B, bool isAlwaysTrue) { + void compareAlwaysTrue(const BinaryOperator *B, bool isAlwaysTrue) override { if (HasMacroID(B)) return; @@ -147,7 +148,8 @@ public: << DiagRange << isAlwaysTrue; } - void compareBitwiseEquality(const BinaryOperator *B, bool isAlwaysTrue) { + void compareBitwiseEquality(const BinaryOperator *B, + bool isAlwaysTrue) override { if (HasMacroID(B)) return; @@ -156,7 +158,7 @@ public: << DiagRange << isAlwaysTrue; } }; - +} // namespace //===----------------------------------------------------------------------===// // Check for infinite self-recursion in functions @@ -1332,9 +1334,7 @@ class UninitValsDiagReporter : public UninitVariablesHandler { public: UninitValsDiagReporter(Sema &S) : S(S), uses(nullptr) {} - ~UninitValsDiagReporter() { - flushDiagnostics(); - } + ~UninitValsDiagReporter() override { flushDiagnostics(); } MappedType &getUses(const VarDecl *vd) { if (!uses) @@ -1438,7 +1438,7 @@ struct SortDiagBySourceLocation { //===----------------------------------------------------------------------===// namespace clang { namespace threadSafety { - +namespace { class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler { Sema &S; DiagList Warnings; @@ -1662,9 +1662,8 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler { } } - - virtual void handleNegativeNotHeld(StringRef Kind, Name LockName, Name Neg, - SourceLocation Loc) override { + void handleNegativeNotHeld(StringRef Kind, Name LockName, Name Neg, + SourceLocation Loc) override { PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_acquire_requires_negative_cap) << Kind << LockName << Neg); @@ -1679,6 +1678,19 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler { Warnings.push_back(DelayedDiag(Warning, getNotes())); } + void handleLockAcquiredBefore(StringRef Kind, Name L1Name, Name L2Name, + SourceLocation Loc) override { + PartialDiagnosticAt Warning(Loc, + S.PDiag(diag::warn_acquired_before) << Kind << L1Name << L2Name); + Warnings.push_back(DelayedDiag(Warning, getNotes())); + } + + void handleBeforeAfterCycle(Name L1Name, SourceLocation Loc) override { + PartialDiagnosticAt Warning(Loc, + S.PDiag(diag::warn_acquired_before_after_cycle) << L1Name); + Warnings.push_back(DelayedDiag(Warning, getNotes())); + } + void enterFunction(const FunctionDecl* FD) override { CurrentFunction = FD; } @@ -1687,9 +1699,9 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler { CurrentFunction = 0; } }; - -} -} +} // namespace +} // namespace threadSafety +} // namespace clang //===----------------------------------------------------------------------===// // -Wconsumed @@ -1704,7 +1716,7 @@ class ConsumedWarningsHandler : public ConsumedWarningsHandlerBase { DiagList Warnings; public: - + ConsumedWarningsHandler(Sema &S) : S(S) {} void emitDiagnostics() override { @@ -1981,7 +1993,8 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P, if (!Diags.isIgnored(diag::warn_thread_safety_verbose, D->getLocStart())) Reporter.setVerbose(true); - threadSafety::runThreadSafetyAnalysis(AC, Reporter); + threadSafety::runThreadSafetyAnalysis(AC, Reporter, + &S.ThreadSafetyDeclCache); Reporter.emitDiagnostics(); } diff --git a/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp b/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp index b2dc2d7..69ae4f0 100644 --- a/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp @@ -251,19 +251,16 @@ const char *CodeCompletionString::getTypedText() const { return nullptr; } -const char *CodeCompletionAllocator::CopyString(StringRef String) { - char *Mem = (char *)Allocate(String.size() + 1, 1); - std::copy(String.begin(), String.end(), Mem); - Mem[String.size()] = 0; - return Mem; -} - -const char *CodeCompletionAllocator::CopyString(Twine String) { +const char *CodeCompletionAllocator::CopyString(const Twine &String) { + SmallString<128> Data; + StringRef Ref = String.toStringRef(Data); // FIXME: It would be more efficient to teach Twine to tell us its size and // then add a routine there to fill in an allocated char* with the contents // of the string. - SmallString<128> Data; - return CopyString(String.toStringRef(Data)); + char *Mem = (char *)Allocate(Ref.size() + 1, 1); + std::copy(Ref.begin(), Ref.end(), Mem); + Mem[Ref.size()] = 0; + return Mem; } StringRef CodeCompletionTUInfo::getParentName(const DeclContext *DC) { @@ -483,6 +480,31 @@ PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &SemaRef, } } +// This function is used solely to preserve the former presentation of overloads +// by "clang -cc1 -code-completion-at", since CodeCompletionString::getAsString +// needs to be improved for printing the newer and more detailed overload +// chunks. +static std::string getOverloadAsString(const CodeCompletionString &CCS) { + std::string Result; + llvm::raw_string_ostream OS(Result); + + for (auto &C : CCS) { + switch (C.Kind) { + case CodeCompletionString::CK_Informative: + case CodeCompletionString::CK_ResultType: + OS << "[#" << C.Text << "#]"; + break; + + case CodeCompletionString::CK_CurrentParameter: + OS << "<#" << C.Text << "#>"; + break; + + default: OS << C.Text; break; + } + } + return OS.str(); +} + void PrintingCodeCompleteConsumer::ProcessOverloadCandidates(Sema &SemaRef, unsigned CurrentArg, @@ -491,8 +513,9 @@ PrintingCodeCompleteConsumer::ProcessOverloadCandidates(Sema &SemaRef, for (unsigned I = 0; I != NumCandidates; ++I) { if (CodeCompletionString *CCS = Candidates[I].CreateSignatureString(CurrentArg, SemaRef, - getAllocator(), CCTUInfo)) { - OS << "OVERLOAD: " << CCS->getAsString() << "\n"; + getAllocator(), CCTUInfo, + includeBriefComments())) { + OS << "OVERLOAD: " << getOverloadAsString(*CCS) << "\n"; } } } diff --git a/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp index 349bb32..1e7fc75 100644 --- a/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp @@ -983,11 +983,18 @@ void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP, const PrintingPoli getSpecifierName((TST)TypeSpecType, Policy)); } - // Only 'short' is valid with vector bool. (PIM 2.1) - if ((TypeSpecWidth != TSW_unspecified) && (TypeSpecWidth != TSW_short)) + // Only 'short' and 'long long' are valid with vector bool. (PIM 2.1) + if ((TypeSpecWidth != TSW_unspecified) && (TypeSpecWidth != TSW_short) && + (TypeSpecWidth != TSW_longlong)) Diag(D, TSWLoc, diag::err_invalid_vector_bool_decl_spec) << getSpecifierName((TSW)TypeSpecWidth); + // vector bool long long requires VSX support. + if ((TypeSpecWidth == TSW_longlong) && + (!PP.getTargetInfo().hasFeature("vsx")) && + (!PP.getTargetInfo().hasFeature("power8-vector"))) + Diag(D, TSTLoc, diag::err_invalid_vector_long_long_decl_spec); + // Elements of vector bool are interpreted as unsigned. (PIM 2.1) if ((TypeSpecType == TST_char) || (TypeSpecType == TST_int) || (TypeSpecWidth != TSW_unspecified)) @@ -1214,7 +1221,10 @@ void UnqualifiedId::setOperatorFunctionId(SourceLocation OperatorLoc, bool VirtSpecifiers::SetSpecifier(Specifier VS, SourceLocation Loc, const char *&PrevSpec) { + if (!FirstLocation.isValid()) + FirstLocation = Loc; LastLocation = Loc; + LastSpecifier = VS; if (Specifiers & VS) { PrevSpec = getSpecifierName(VS); diff --git a/contrib/llvm/tools/clang/lib/Sema/DelayedDiagnostic.cpp b/contrib/llvm/tools/clang/lib/Sema/DelayedDiagnostic.cpp index 664a6b1..ceea04f 100644 --- a/contrib/llvm/tools/clang/lib/Sema/DelayedDiagnostic.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/DelayedDiagnostic.cpp @@ -35,6 +35,8 @@ DelayedDiagnostic::makeAvailability(Sema::AvailabilityDiagnostic AD, case Sema::AD_Unavailable: DD.Kind = Unavailable; break; + case Sema::AD_Partial: + llvm_unreachable("AD_Partial diags should not be delayed"); } DD.Triggered = false; DD.Loc = Loc; diff --git a/contrib/llvm/tools/clang/lib/Sema/IdentifierResolver.cpp b/contrib/llvm/tools/clang/lib/Sema/IdentifierResolver.cpp index 6586fb3..53263ba 100644 --- a/contrib/llvm/tools/clang/lib/Sema/IdentifierResolver.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/IdentifierResolver.cpp @@ -98,7 +98,7 @@ bool IdentifierResolver::isDeclInScope(Decl *D, DeclContext *Ctx, Scope *S, bool AllowInlineNamespace) const { Ctx = Ctx->getRedeclContext(); - if (Ctx->isFunctionOrMethod() || S->isFunctionPrototypeScope()) { + if (Ctx->isFunctionOrMethod() || (S && S->isFunctionPrototypeScope())) { // Ignore the scopes associated within transparent declaration contexts. while (S->getEntity() && S->getEntity()->isTransparentContext()) S = S->getParent(); @@ -266,6 +266,11 @@ static DeclMatchKind compareDeclarations(NamedDecl *Existing, NamedDecl *New) { // If the declarations are redeclarations of each other, keep the newest one. if (Existing->getCanonicalDecl() == New->getCanonicalDecl()) { + // If we're adding an imported declaration, don't replace another imported + // declaration. + if (Existing->isFromASTFile() && New->isFromASTFile()) + return DMK_Different; + // If either of these is the most recent declaration, use it. Decl *MostRecent = Existing->getMostRecentDecl(); if (Existing == MostRecent) diff --git a/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp b/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp index fd75c02..aac28be 100644 --- a/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp @@ -338,6 +338,36 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned &origParentScope) return; } + case Stmt::SEHTryStmtClass: { + SEHTryStmt *TS = cast<SEHTryStmt>(S); + unsigned newParentScope; + Scopes.push_back(GotoScope(ParentScope, + diag::note_protected_by_seh_try, + diag::note_exits_seh_try, + TS->getSourceRange().getBegin())); + if (Stmt *TryBlock = TS->getTryBlock()) + BuildScopeInformation(TryBlock, (newParentScope = Scopes.size()-1)); + + // Jump from __except or __finally into the __try are not allowed either. + if (SEHExceptStmt *Except = TS->getExceptHandler()) { + Scopes.push_back(GotoScope(ParentScope, + diag::note_protected_by_seh_except, + diag::note_exits_seh_except, + Except->getSourceRange().getBegin())); + BuildScopeInformation(Except->getBlock(), + (newParentScope = Scopes.size()-1)); + } else if (SEHFinallyStmt *Finally = TS->getFinallyHandler()) { + Scopes.push_back(GotoScope(ParentScope, + diag::note_protected_by_seh_finally, + diag::note_exits_seh_finally, + Finally->getSourceRange().getBegin())); + BuildScopeInformation(Finally->getBlock(), + (newParentScope = Scopes.size()-1)); + } + + return; + } + default: break; } @@ -417,7 +447,8 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned &origParentScope) unsigned newParentScope; // Disallow jumps into the protected statement of an @synchronized, but // allow jumps into the object expression it protects. - if (ObjCAtSynchronizedStmt *AS = dyn_cast<ObjCAtSynchronizedStmt>(SubStmt)){ + if (ObjCAtSynchronizedStmt *AS = + dyn_cast<ObjCAtSynchronizedStmt>(SubStmt)) { // Recursively walk the AST for the @synchronized object expr, it is // evaluated in the normal scope. BuildScopeInformation(AS->getSynchExpr(), ParentScope); @@ -434,14 +465,16 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned &origParentScope) } // Disallow jumps into the protected statement of an @autoreleasepool. - if (ObjCAutoreleasePoolStmt *AS = dyn_cast<ObjCAutoreleasePoolStmt>(SubStmt)){ - // Recursively walk the AST for the @autoreleasepool part, protected by a new - // scope. + if (ObjCAutoreleasePoolStmt *AS = + dyn_cast<ObjCAutoreleasePoolStmt>(SubStmt)) { + // Recursively walk the AST for the @autoreleasepool part, protected by a + // new scope. Scopes.push_back(GotoScope(ParentScope, diag::note_protected_by_objc_autoreleasepool, diag::note_exits_objc_autoreleasepool, AS->getAtLoc())); - BuildScopeInformation(AS->getSubStmt(), (newParentScope = Scopes.size()-1)); + BuildScopeInformation(AS->getSubStmt(), + (newParentScope = Scopes.size() - 1)); continue; } @@ -756,6 +789,18 @@ void JumpScopeChecker::CheckJump(Stmt *From, Stmt *To, SourceLocation DiagLoc, // Common case: exactly the same scope, which is fine. if (FromScope == ToScope) return; + // Warn on gotos out of __finally blocks. + if (isa<GotoStmt>(From) || isa<IndirectGotoStmt>(From)) { + // If FromScope > ToScope, FromScope is more nested and the jump goes to a + // less nested scope. Check if it crosses a __finally along the way. + for (unsigned I = FromScope; I > ToScope; I = Scopes[I].ParentScope) { + if (Scopes[I].InDiag == diag::note_protected_by_seh_finally) { + S.Diag(From->getLocStart(), diag::warn_jump_out_of_seh_finally); + break; + } + } + } + unsigned CommonScope = GetDeepestCommonScope(FromScope, ToScope); // It's okay to jump out from a nested scope. diff --git a/contrib/llvm/tools/clang/lib/Sema/MultiplexExternalSemaSource.cpp b/contrib/llvm/tools/clang/lib/Sema/MultiplexExternalSemaSource.cpp index 449ddf4..9ecb5a7 100644 --- a/contrib/llvm/tools/clang/lib/Sema/MultiplexExternalSemaSource.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/MultiplexExternalSemaSource.cpp @@ -86,6 +86,14 @@ CXXBaseSpecifier *MultiplexExternalSemaSource::GetExternalCXXBaseSpecifiers( return nullptr; } +CXXCtorInitializer ** +MultiplexExternalSemaSource::GetExternalCXXCtorInitializers(uint64_t Offset) { + for (auto *S : Sources) + if (auto *R = S->GetExternalCXXCtorInitializers(Offset)) + return R; + return nullptr; +} + bool MultiplexExternalSemaSource:: FindExternalVisibleDeclsByName(const DeclContext *DC, DeclarationName Name) { bool AnyDeclsFound = false; @@ -204,7 +212,15 @@ void MultiplexExternalSemaSource::ReadUndefinedButUsed( for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->ReadUndefinedButUsed(Undefined); } - + +void MultiplexExternalSemaSource::ReadMismatchingDeleteExpressions( + llvm::MapVector<FieldDecl *, + llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> & + Exprs) { + for (auto &Source : Sources) + Source->ReadMismatchingDeleteExpressions(Exprs); +} + bool MultiplexExternalSemaSource::LookupUnqualified(LookupResult &R, Scope *S){ for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->LookupUnqualified(R, S); @@ -236,24 +252,12 @@ void MultiplexExternalSemaSource::ReadExtVectorDecls( Sources[i]->ReadExtVectorDecls(Decls); } -void MultiplexExternalSemaSource::ReadDynamicClasses( - SmallVectorImpl<CXXRecordDecl*> &Decls) { - for(size_t i = 0; i < Sources.size(); ++i) - Sources[i]->ReadDynamicClasses(Decls); -} - void MultiplexExternalSemaSource::ReadUnusedLocalTypedefNameCandidates( llvm::SmallSetVector<const TypedefNameDecl *, 4> &Decls) { for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->ReadUnusedLocalTypedefNameCandidates(Decls); } -void MultiplexExternalSemaSource::ReadLocallyScopedExternCDecls( - SmallVectorImpl<NamedDecl*> &Decls) { - for(size_t i = 0; i < Sources.size(); ++i) - Sources[i]->ReadLocallyScopedExternCDecls(Decls); -} - void MultiplexExternalSemaSource::ReadReferencedSelectors( SmallVectorImpl<std::pair<Selector, SourceLocation> > &Sels) { for(size_t i = 0; i < Sources.size(); ++i) @@ -280,7 +284,7 @@ void MultiplexExternalSemaSource::ReadPendingInstantiations( } void MultiplexExternalSemaSource::ReadLateParsedTemplates( - llvm::DenseMap<const FunctionDecl *, LateParsedTemplate *> &LPTMap) { + llvm::MapVector<const FunctionDecl *, LateParsedTemplate *> &LPTMap) { for (size_t i = 0; i < Sources.size(); ++i) Sources[i]->ReadLateParsedTemplates(LPTMap); } diff --git a/contrib/llvm/tools/clang/lib/Sema/Scope.cpp b/contrib/llvm/tools/clang/lib/Sema/Scope.cpp index 6c79778..7c70048 100644 --- a/contrib/llvm/tools/clang/lib/Sema/Scope.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/Scope.cpp @@ -38,7 +38,8 @@ void Scope::Init(Scope *parent, unsigned flags) { FnParent = parent->FnParent; BlockParent = parent->BlockParent; TemplateParamParent = parent->TemplateParamParent; - MSLocalManglingParent = parent->MSLocalManglingParent; + MSLastManglingParent = parent->MSLastManglingParent; + MSCurManglingNumber = getMSLastManglingNumber(); if ((Flags & (FnScope | ClassScope | BlockScope | TemplateParamScope | FunctionPrototypeScope | AtCatchScope | ObjCMethodScope)) == 0) @@ -47,9 +48,10 @@ void Scope::Init(Scope *parent, unsigned flags) { Depth = 0; PrototypeDepth = 0; PrototypeIndex = 0; - MSLocalManglingParent = FnParent = BlockParent = nullptr; + MSLastManglingParent = FnParent = BlockParent = nullptr; TemplateParamParent = nullptr; - MSLocalManglingNumber = 1; + MSLastManglingNumber = 1; + MSCurManglingNumber = 1; } // If this scope is a function or contains breaks/continues, remember it. @@ -57,8 +59,9 @@ void Scope::Init(Scope *parent, unsigned flags) { // The MS mangler uses the number of scopes that can hold declarations as // part of an external name. if (Flags & (ClassScope | FnScope)) { - MSLocalManglingNumber = getMSLocalManglingNumber(); - MSLocalManglingParent = this; + MSLastManglingNumber = getMSLastManglingNumber(); + MSLastManglingParent = this; + MSCurManglingNumber = 1; } if (flags & BreakScope) BreakParent = this; if (flags & ContinueScope) ContinueParent = this; @@ -78,7 +81,7 @@ void Scope::Init(Scope *parent, unsigned flags) { else if ((flags & EnumScope)) ; // Don't increment for enum scopes. else - incrementMSLocalManglingNumber(); + incrementMSManglingNumber(); } DeclsInScope.clear(); @@ -185,6 +188,9 @@ void Scope::dumpImpl(raw_ostream &OS) const { } else if (Flags & SEHTryScope) { OS << "SEHTryScope"; Flags &= ~SEHTryScope; + } else if (Flags & SEHExceptScope) { + OS << "SEHExceptScope"; + Flags &= ~SEHExceptScope; } else if (Flags & OpenMPDirectiveScope) { OS << "OpenMPDirectiveScope"; Flags &= ~OpenMPDirectiveScope; @@ -206,12 +212,13 @@ void Scope::dumpImpl(raw_ostream &OS) const { OS << "Parent: (clang::Scope*)" << Parent << '\n'; OS << "Depth: " << Depth << '\n'; - OS << "MSLocalManglingNumber: " << getMSLocalManglingNumber() << '\n'; + OS << "MSLastManglingNumber: " << getMSLastManglingNumber() << '\n'; + OS << "MSCurManglingNumber: " << getMSCurManglingNumber() << '\n'; if (const DeclContext *DC = getEntity()) OS << "Entity : (clang::DeclContext*)" << DC << '\n'; if (NRVO.getInt()) - OS << "NRVO not allowed"; + OS << "NRVO not allowed\n"; else if (NRVO.getPointer()) OS << "NRVO candidate : (clang::VarDecl*)" << NRVO.getPointer() << '\n'; } diff --git a/contrib/llvm/tools/clang/lib/Sema/ScopeInfo.cpp b/contrib/llvm/tools/clang/lib/Sema/ScopeInfo.cpp index 63ef3b2..f80eadf 100644 --- a/contrib/llvm/tools/clang/lib/Sema/ScopeInfo.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/ScopeInfo.cpp @@ -33,6 +33,8 @@ void FunctionScopeInfo::Clear() { ObjCWarnForNoDesignatedInitChain = false; ObjCIsSecondaryInit = false; ObjCWarnForNoInitDelegation = false; + FirstCXXTryLoc = SourceLocation(); + FirstSEHTryLoc = SourceLocation(); SwitchStack.clear(); Returns.clear(); diff --git a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp index b9aaf16..50edc42 100644 --- a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp @@ -99,10 +99,11 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, GlobalNewDeleteDeclared(false), TUKind(TUKind), NumSFINAEErrors(0), + CachedFakeTopLevelModule(nullptr), AccessCheckingSFINAE(false), InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0), ArgumentPackSubstitutionIndex(-1), CurrentInstantiationScope(nullptr), DisableTypoCorrection(false), - TyposCorrected(0), AnalysisWarnings(*this), + TyposCorrected(0), AnalysisWarnings(*this), ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr), CurScope(nullptr), Ident_super(nullptr), Ident___float128(nullptr) { @@ -122,9 +123,7 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, PP.getDiagnostics().SetArgToStringFn(&FormatASTNodeDiagnosticArgument, &Context); - ExprEvalContexts.push_back( - ExpressionEvaluationContextRecord(PotentiallyEvaluated, 0, - false, nullptr, false)); + ExprEvalContexts.emplace_back(PotentiallyEvaluated, 0, false, nullptr, false); FunctionScopes.push_back(new FunctionScopeInfo(Diags)); @@ -195,8 +194,9 @@ void Sema::Initialize() { } // Initialize Microsoft "predefined C++ types". - if (PP.getLangOpts().MSVCCompat && PP.getLangOpts().CPlusPlus) { - if (IdResolver.begin(&Context.Idents.get("type_info")) == IdResolver.end()) + if (PP.getLangOpts().MSVCCompat) { + if (PP.getLangOpts().CPlusPlus && + IdResolver.begin(&Context.Idents.get("type_info")) == IdResolver.end()) PushOnScopeChains(Context.buildImplicitRecord("type_info", TTK_Class), TUScope); @@ -213,6 +213,29 @@ void Sema::Initialize() { addImplicitTypedef("image3d_t", Context.OCLImage3dTy); addImplicitTypedef("sampler_t", Context.OCLSamplerTy); addImplicitTypedef("event_t", Context.OCLEventTy); + if (getLangOpts().OpenCLVersion >= 200) { + addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy)); + addImplicitTypedef("atomic_uint", + Context.getAtomicType(Context.UnsignedIntTy)); + addImplicitTypedef("atomic_long", Context.getAtomicType(Context.LongTy)); + addImplicitTypedef("atomic_ulong", + Context.getAtomicType(Context.UnsignedLongTy)); + addImplicitTypedef("atomic_float", + Context.getAtomicType(Context.FloatTy)); + addImplicitTypedef("atomic_double", + Context.getAtomicType(Context.DoubleTy)); + // OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as + // 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide. + addImplicitTypedef("atomic_flag", Context.getAtomicType(Context.IntTy)); + addImplicitTypedef("atomic_intptr_t", + Context.getAtomicType(Context.getIntPtrType())); + addImplicitTypedef("atomic_uintptr_t", + Context.getAtomicType(Context.getUIntPtrType())); + addImplicitTypedef("atomic_size_t", + Context.getAtomicType(Context.getSizeType())); + addImplicitTypedef("atomic_ptrdiff_t", + Context.getAtomicType(Context.getPointerDiffType())); + } } DeclarationName BuiltinVaList = &Context.Idents.get("__builtin_va_list"); @@ -243,6 +266,8 @@ Sema::~Sema() { if (isMultiplexExternalSource) delete ExternalSource; + threadSafety::threadSafetyCleanup(ThreadSafetyDeclCache); + // Destroys data sharing attributes stack for OpenMP DestroyDataSharingAttributesStack(); @@ -337,18 +362,6 @@ ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty, if (ExprTy == TypeTy) return E; - // If this is a derived-to-base cast to a through a virtual base, we - // need a vtable. - if (Kind == CK_DerivedToBase && - BasePathInvolvesVirtualBase(*BasePath)) { - QualType T = E->getType(); - if (const PointerType *Pointer = T->getAs<PointerType>()) - T = Pointer->getPointeeType(); - if (const RecordType *RecordTy = T->getAs<RecordType>()) - MarkVTableUsed(E->getLocStart(), - cast<CXXRecordDecl>(RecordTy->getDecl())); - } - if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(E)) { if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) { ImpCast->setType(Ty); @@ -512,14 +525,8 @@ void Sema::LoadExternalWeakUndeclaredIdentifiers() { SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs; ExternalSource->ReadWeakUndeclaredIdentifiers(WeakIDs); - for (unsigned I = 0, N = WeakIDs.size(); I != N; ++I) { - llvm::DenseMap<IdentifierInfo*,WeakInfo>::iterator Pos - = WeakUndeclaredIdentifiers.find(WeakIDs[I].first); - if (Pos != WeakUndeclaredIdentifiers.end()) - continue; - - WeakUndeclaredIdentifiers.insert(WeakIDs[I]); - } + for (auto &WeakID : WeakIDs) + WeakUndeclaredIdentifiers.insert(WeakID); } @@ -632,22 +639,6 @@ void Sema::ActOnEndOfTranslationUnit() { if (TUKind != TU_Prefix) { DiagnoseUseOfUnimplementedSelectors(); - // If any dynamic classes have their key function defined within - // this translation unit, then those vtables are considered "used" and must - // be emitted. - for (DynamicClassesType::iterator I = DynamicClasses.begin(ExternalSource), - E = DynamicClasses.end(); - I != E; ++I) { - assert(!(*I)->isDependentType() && - "Should not see dependent types here!"); - if (const CXXMethodDecl *KeyFunction = - Context.getCurrentKeyFunction(*I)) { - const FunctionDecl *Definition = nullptr; - if (KeyFunction->hasBody(Definition)) - MarkVTableUsed(Definition->getLocation(), *I, true); - } - } - // If DefinedUsedVTables ends up marking any virtual member functions it // might lead to more pending template instantiations, which we then need // to instantiate. @@ -679,6 +670,8 @@ void Sema::ActOnEndOfTranslationUnit() { // All delayed member exception specs should be checked or we end up accepting // incompatible declarations. + // FIXME: This is wrong for TUKind == TU_Prefix. In that case, we need to + // write out the lists to the AST file (if any). assert(DelayedDefaultedMemberExceptionSpecs.empty()); assert(DelayedExceptionSpecChecks.empty()); @@ -696,16 +689,13 @@ void Sema::ActOnEndOfTranslationUnit() { } // Check for #pragma weak identifiers that were never declared - // FIXME: This will cause diagnostics to be emitted in a non-determinstic - // order! Iterating over a densemap like this is bad. LoadExternalWeakUndeclaredIdentifiers(); - for (llvm::DenseMap<IdentifierInfo*,WeakInfo>::iterator - I = WeakUndeclaredIdentifiers.begin(), - E = WeakUndeclaredIdentifiers.end(); I != E; ++I) { - if (I->second.getUsed()) continue; + for (auto WeakID : WeakUndeclaredIdentifiers) { + if (WeakID.second.getUsed()) + continue; - Diag(I->second.getLocation(), diag::warn_weak_identifier_undeclared) - << I->first; + Diag(WeakID.second.getLocation(), diag::warn_weak_identifier_undeclared) + << WeakID.first; } if (LangOpts.CPlusPlus11 && @@ -871,6 +861,17 @@ void Sema::ActOnEndOfTranslationUnit() { } } + if (!Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) { + if (ExternalSource) + ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs); + for (const auto &DeletedFieldInfo : DeleteExprs) { + for (const auto &DeleteExprLoc : DeletedFieldInfo.second) { + AnalyzeDeleteExprMismatch(DeletedFieldInfo.first, DeleteExprLoc.first, + DeleteExprLoc.second); + } + } + } + // Check we've noticed that we're no longer parsing the initializer for every // variable. If we miss cases, then at best we have a performance issue and // at worst a rejects-valid bug. @@ -1230,6 +1231,9 @@ void ExternalSemaSource::ReadUndefinedButUsed( llvm::DenseMap<NamedDecl *, SourceLocation> &Undefined) { } +void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector< + FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {} + void PrettyDeclStackTraceEntry::print(raw_ostream &OS) const { SourceLocation Loc = this->Loc; if (!Loc.isValid() && TheDecl) Loc = TheDecl->getLocation(); @@ -1478,3 +1482,8 @@ CapturedRegionScopeInfo *Sema::getCurCapturedRegion() { return dyn_cast<CapturedRegionScopeInfo>(FunctionScopes.back()); } + +const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> & +Sema::getMismatchingDeleteExpressions() const { + return DeleteExprs; +} diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp index 37240c2..0e973cc 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp @@ -1462,7 +1462,7 @@ static Sema::AccessResult CheckAccess(Sema &S, SourceLocation Loc, case AR_inaccessible: return Sema::AR_inaccessible; case AR_dependent: return Sema::AR_dependent; } - llvm_unreachable("falling off end"); + llvm_unreachable("invalid access result"); } void Sema::HandleDelayedAccessCheck(DelayedDiagnostic &DD, Decl *D) { diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp index 7629797..5a29bad 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp @@ -131,7 +131,7 @@ void Sema::AddAlignmentAttributesForRecord(RecordDecl *RD) { void Sema::AddMsStructLayoutForRecord(RecordDecl *RD) { if (MSStructPragmaOn) - RD->addAttr(MsStructAttr::CreateImplicit(Context)); + RD->addAttr(MSStructAttr::CreateImplicit(Context)); // FIXME: We should merge AddAlignmentAttributesForRecord with // AddMsStructLayoutForRecord into AddPragmaAttributesForRecord, which takes @@ -422,6 +422,9 @@ void Sema::ActOnPragmaMSSeg(SourceLocation PragmaLocation, if (Action & PSK_Pop && Stack->Stack.empty()) Diag(PragmaLocation, diag::warn_pragma_pop_failed) << PragmaName << "stack empty"; + if (SegmentName && + !checkSectionName(SegmentName->getLocStart(), SegmentName->getString())) + return; Stack->Act(PragmaLocation, Action, StackSlotLabel, SegmentName); } diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCUDA.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCUDA.cpp index 64222fb..5973500 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaCUDA.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaCUDA.cpp @@ -62,6 +62,11 @@ Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D) { bool Sema::CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee) { + // The CUDADisableTargetCallChecks short-circuits this check: we assume all + // cross-target calls are valid. + if (getLangOpts().CUDADisableTargetCallChecks) + return false; + CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller), CalleeTarget = IdentifyCUDATarget(Callee); @@ -92,9 +97,21 @@ bool Sema::CheckCUDATarget(const FunctionDecl *Caller, if (Caller->isImplicit()) return false; bool InDeviceMode = getLangOpts().CUDAIsDevice; - if ((InDeviceMode && CalleeTarget != CFT_Device) || - (!InDeviceMode && CalleeTarget != CFT_Host)) + if (!InDeviceMode && CalleeTarget != CFT_Host) + return true; + if (InDeviceMode && CalleeTarget != CFT_Device) { + // Allow host device functions to call host functions if explicitly + // requested. + if (CalleeTarget == CFT_Host && + getLangOpts().CUDAAllowHostCallsFromHostDevice) { + Diag(Caller->getLocation(), + diag::warn_host_calls_from_host_device) + << Callee->getNameAsString() << Caller->getNameAsString(); + return false; + } + return true; + } } return false; diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp index 3e56e67..9e146ed 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp @@ -218,6 +218,7 @@ bool Sema::RequireCompleteDeclContext(CXXScopeSpec &SS, // Fixed enum types are complete, but they aren't valid as scopes // until we see a definition, so awkwardly pull out this special // case. + // FIXME: The definition might not be visible; complain if it is not. const EnumType *enumType = dyn_cast_or_null<EnumType>(tagType); if (!enumType || enumType->getDecl()->isCompleteDefinition()) return false; @@ -282,7 +283,11 @@ bool Sema::ActOnSuperScopeSpecifier(SourceLocation SuperLoc, /// \brief Determines whether the given declaration is an valid acceptable /// result for name lookup of a nested-name-specifier. -bool Sema::isAcceptableNestedNameSpecifier(const NamedDecl *SD) { +/// \param SD Declaration checked for nested-name-specifier. +/// \param IsExtension If not null and the declaration is accepted as an +/// extension, the pointed variable is assigned true. +bool Sema::isAcceptableNestedNameSpecifier(const NamedDecl *SD, + bool *IsExtension) { if (!SD) return false; @@ -298,14 +303,23 @@ bool Sema::isAcceptableNestedNameSpecifier(const NamedDecl *SD) { QualType T = Context.getTypeDeclType(cast<TypeDecl>(SD)); if (T->isDependentType()) return true; - else if (const TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(SD)) { - if (TD->getUnderlyingType()->isRecordType() || - (Context.getLangOpts().CPlusPlus11 && - TD->getUnderlyingType()->isEnumeralType())) + if (const TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(SD)) { + if (TD->getUnderlyingType()->isRecordType()) return true; - } else if (isa<RecordDecl>(SD) || - (Context.getLangOpts().CPlusPlus11 && isa<EnumDecl>(SD))) + if (TD->getUnderlyingType()->isEnumeralType()) { + if (Context.getLangOpts().CPlusPlus11) + return true; + if (IsExtension) + *IsExtension = true; + } + } else if (isa<RecordDecl>(SD)) { return true; + } else if (isa<EnumDecl>(SD)) { + if (Context.getLangOpts().CPlusPlus11) + return true; + if (IsExtension) + *IsExtension = true; + } return false; } @@ -599,7 +613,13 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, } NamedDecl *SD = Found.getAsSingle<NamedDecl>(); - if (isAcceptableNestedNameSpecifier(SD)) { + bool IsExtension = false; + bool AcceptSpec = isAcceptableNestedNameSpecifier(SD, &IsExtension); + if (!AcceptSpec && IsExtension) { + AcceptSpec = true; + Diag(IdentifierLoc, diag::ext_nested_name_spec_is_enum); + } + if (AcceptSpec) { if (!ObjectType.isNull() && !ObjectTypeSearchedInScope && !getLangOpts().CPlusPlus11) { // C++03 [basic.lookup.classref]p4: diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp index a4c2d9b..091e779 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp @@ -389,6 +389,33 @@ static void diagnoseBadCast(Sema &S, unsigned msg, CastType castType, S.Diag(opRange.getBegin(), msg) << castType << src->getType() << destType << opRange << src->getSourceRange(); + + // Detect if both types are (ptr to) class, and note any incompleteness. + int DifferentPtrness = 0; + QualType From = destType; + if (auto Ptr = From->getAs<PointerType>()) { + From = Ptr->getPointeeType(); + DifferentPtrness++; + } + QualType To = src->getType(); + if (auto Ptr = To->getAs<PointerType>()) { + To = Ptr->getPointeeType(); + DifferentPtrness--; + } + if (!DifferentPtrness) { + auto RecFrom = From->getAs<RecordType>(); + auto RecTo = To->getAs<RecordType>(); + if (RecFrom && RecTo) { + auto DeclFrom = RecFrom->getAsCXXRecordDecl(); + if (!DeclFrom->isCompleteDefinition()) + S.Diag(DeclFrom->getLocation(), diag::note_type_incomplete) + << DeclFrom->getDeclName(); + auto DeclTo = RecTo->getAsCXXRecordDecl(); + if (!DeclTo->isCompleteDefinition()) + S.Diag(DeclTo->getLocation(), diag::note_type_incomplete) + << DeclTo->getDeclName(); + } + } } /// UnwrapDissimilarPointerTypes - Like Sema::UnwrapSimilarPointerTypes, @@ -665,12 +692,6 @@ void CastOperation::CheckDynamicCast() { } Kind = CK_DerivedToBase; - - // If we are casting to or through a virtual base class, we need a - // vtable. - if (Self.BasePathInvolvesVirtualBase(BasePath)) - Self.MarkVTableUsed(OpRange.getBegin(), - cast<CXXRecordDecl>(SrcRecord->getDecl())); return; } @@ -682,8 +703,6 @@ void CastOperation::CheckDynamicCast() { << SrcPointee.getUnqualifiedType() << SrcExpr.get()->getSourceRange(); SrcExpr = ExprError(); } - Self.MarkVTableUsed(OpRange.getBegin(), - cast<CXXRecordDecl>(SrcRecord->getDecl())); // dynamic_cast is not available with -fno-rtti. // As an exception, dynamic_cast to void* is available because it doesn't @@ -1087,6 +1106,14 @@ static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr, if (!CStyle && Self.CheckTollFreeBridgeStaticCast(DestType, SrcExpr.get(), Kind)) return TC_Success; + + // See if it looks like the user is trying to convert between + // related record types, and select a better diagnostic if so. + if (auto SrcPointer = SrcType->getAs<PointerType>()) + if (auto DestPointer = DestType->getAs<PointerType>()) + if (SrcPointer->getPointeeType()->getAs<RecordType>() && + DestPointer->getPointeeType()->getAs<RecordType>()) + msg = diag::err_bad_cxx_cast_unrelated_class; // We tried everything. Everything! Nothing works! :-( return TC_NotApplicable; @@ -1790,8 +1817,8 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr, // can be explicitly converted to an rvalue of type "pointer to member // of Y of type T2" if T1 and T2 are both function types or both object // types. - if (DestMemPtr->getPointeeType()->isFunctionType() != - SrcMemPtr->getPointeeType()->isFunctionType()) + if (DestMemPtr->isMemberFunctionPointer() != + SrcMemPtr->isMemberFunctionPointer()) return TC_NotApplicable; // C++ 5.2.10p2: The reinterpret_cast operator shall not cast away diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp index 6ab957e..23a6fc3 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp @@ -184,7 +184,7 @@ static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { return true; } - QualType ReturnTy = CE->getCallReturnType(); + QualType ReturnTy = CE->getCallReturnType(S.Context); QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; QualType BuiltinTy = S.Context.getFunctionType( ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); @@ -202,6 +202,28 @@ static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { return false; } +static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, + Scope::ScopeFlags NeededScopeFlags, + unsigned DiagID) { + // Scopes aren't available during instantiation. Fortunately, builtin + // functions cannot be template args so they cannot be formed through template + // instantiation. Therefore checking once during the parse is sufficient. + if (!SemaRef.ActiveTemplateInstantiations.empty()) + return false; + + Scope *S = SemaRef.getCurScope(); + while (S && !S->isSEHExceptScope()) + S = S->getParent(); + if (!S || !(S->getFlags() & NeededScopeFlags)) { + auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); + SemaRef.Diag(TheCall->getExprLoc(), DiagID) + << DRE->getDecl()->getIdentifier(); + return true; + } + + return false; +} + ExprResult Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall) { @@ -301,6 +323,11 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, if (SemaBuiltinSetjmp(TheCall)) return ExprError(); break; + case Builtin::BI_setjmp: + case Builtin::BI_setjmpex: + if (checkArgCount(*this, TheCall, 1)) + return true; + break; case Builtin::BI__builtin_classify_type: if (checkArgCount(*this, TheCall, 1)) return true; @@ -465,6 +492,35 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, if (SemaBuiltinCallWithStaticChain(*this, TheCall)) return ExprError(); break; + + case Builtin::BI__exception_code: + case Builtin::BI_exception_code: { + if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, + diag::err_seh___except_block)) + return ExprError(); + break; + } + case Builtin::BI__exception_info: + case Builtin::BI_exception_info: { + if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, + diag::err_seh___except_filter)) + return ExprError(); + break; + } + + case Builtin::BI__GetExceptionInfo: + if (checkArgCount(*this, TheCall, 1)) + return ExprError(); + + if (CheckCXXThrowOperand( + TheCall->getLocStart(), + Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), + TheCall)) + return ExprError(); + + TheCall->setType(Context.VoidPtrTy); + break; + } // Since the target specific builtins for each arch overlap, only check those @@ -490,11 +546,21 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, if (CheckMipsBuiltinFunctionCall(BuiltinID, TheCall)) return ExprError(); break; + case llvm::Triple::systemz: + if (CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall)) + return ExprError(); + break; case llvm::Triple::x86: case llvm::Triple::x86_64: if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall)) return ExprError(); break; + case llvm::Triple::ppc: + case llvm::Triple::ppc64: + case llvm::Triple::ppc64le: + if (CheckPPCBuiltinFunctionCall(BuiltinID, TheCall)) + return ExprError(); + break; default: break; } @@ -557,7 +623,10 @@ static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, case NeonTypeFlags::Poly16: return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; case NeonTypeFlags::Poly64: - return Context.UnsignedLongTy; + if (IsInt64Long) + return Context.UnsignedLongTy; + else + return Context.UnsignedLongLongTy; case NeonTypeFlags::Poly128: break; case NeonTypeFlags::Float16: @@ -839,15 +908,161 @@ bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { return SemaBuiltinConstantArgRange(TheCall, i, l, u); } +bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { + unsigned i = 0, l = 0, u = 0; + bool Is64BitBltin = BuiltinID == PPC::BI__builtin_divde || + BuiltinID == PPC::BI__builtin_divdeu || + BuiltinID == PPC::BI__builtin_bpermd; + bool IsTarget64Bit = Context.getTargetInfo() + .getTypeWidth(Context + .getTargetInfo() + .getIntPtrType()) == 64; + bool IsBltinExtDiv = BuiltinID == PPC::BI__builtin_divwe || + BuiltinID == PPC::BI__builtin_divweu || + BuiltinID == PPC::BI__builtin_divde || + BuiltinID == PPC::BI__builtin_divdeu; + + if (Is64BitBltin && !IsTarget64Bit) + return Diag(TheCall->getLocStart(), diag::err_64_bit_builtin_32_bit_tgt) + << TheCall->getSourceRange(); + + if ((IsBltinExtDiv && !Context.getTargetInfo().hasFeature("extdiv")) || + (BuiltinID == PPC::BI__builtin_bpermd && + !Context.getTargetInfo().hasFeature("bpermd"))) + return Diag(TheCall->getLocStart(), diag::err_ppc_builtin_only_on_pwr7) + << TheCall->getSourceRange(); + + switch (BuiltinID) { + default: return false; + case PPC::BI__builtin_altivec_crypto_vshasigmaw: + case PPC::BI__builtin_altivec_crypto_vshasigmad: + return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || + SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); + case PPC::BI__builtin_tbegin: + case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break; + case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break; + case PPC::BI__builtin_tabortwc: + case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break; + case PPC::BI__builtin_tabortwci: + case PPC::BI__builtin_tabortdci: + return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || + SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); + } + return SemaBuiltinConstantArgRange(TheCall, i, l, u); +} + +bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, + CallExpr *TheCall) { + if (BuiltinID == SystemZ::BI__builtin_tabort) { + Expr *Arg = TheCall->getArg(0); + llvm::APSInt AbortCode(32); + if (Arg->isIntegerConstantExpr(AbortCode, Context) && + AbortCode.getSExtValue() >= 0 && AbortCode.getSExtValue() < 256) + return Diag(Arg->getLocStart(), diag::err_systemz_invalid_tabort_code) + << Arg->getSourceRange(); + } + + // For intrinsics which take an immediate value as part of the instruction, + // range check them here. + unsigned i = 0, l = 0, u = 0; + switch (BuiltinID) { + default: return false; + case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; + case SystemZ::BI__builtin_s390_verimb: + case SystemZ::BI__builtin_s390_verimh: + case SystemZ::BI__builtin_s390_verimf: + case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; + case SystemZ::BI__builtin_s390_vfaeb: + case SystemZ::BI__builtin_s390_vfaeh: + case SystemZ::BI__builtin_s390_vfaef: + case SystemZ::BI__builtin_s390_vfaebs: + case SystemZ::BI__builtin_s390_vfaehs: + case SystemZ::BI__builtin_s390_vfaefs: + case SystemZ::BI__builtin_s390_vfaezb: + case SystemZ::BI__builtin_s390_vfaezh: + case SystemZ::BI__builtin_s390_vfaezf: + case SystemZ::BI__builtin_s390_vfaezbs: + case SystemZ::BI__builtin_s390_vfaezhs: + case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; + case SystemZ::BI__builtin_s390_vfidb: + return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || + SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); + case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; + case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; + case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; + case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; + case SystemZ::BI__builtin_s390_vstrcb: + case SystemZ::BI__builtin_s390_vstrch: + case SystemZ::BI__builtin_s390_vstrcf: + case SystemZ::BI__builtin_s390_vstrczb: + case SystemZ::BI__builtin_s390_vstrczh: + case SystemZ::BI__builtin_s390_vstrczf: + case SystemZ::BI__builtin_s390_vstrcbs: + case SystemZ::BI__builtin_s390_vstrchs: + case SystemZ::BI__builtin_s390_vstrcfs: + case SystemZ::BI__builtin_s390_vstrczbs: + case SystemZ::BI__builtin_s390_vstrczhs: + case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; + } + return SemaBuiltinConstantArgRange(TheCall, i, l, u); +} + bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { unsigned i = 0, l = 0, u = 0; switch (BuiltinID) { default: return false; case X86::BI_mm_prefetch: i = 1; l = 0; u = 3; break; + case X86::BI__builtin_ia32_sha1rnds4: i = 2, l = 0; u = 3; break; + case X86::BI__builtin_ia32_vpermil2pd: + case X86::BI__builtin_ia32_vpermil2pd256: + case X86::BI__builtin_ia32_vpermil2ps: + case X86::BI__builtin_ia32_vpermil2ps256: i = 3, l = 0; u = 3; break; + case X86::BI__builtin_ia32_cmpb128_mask: + case X86::BI__builtin_ia32_cmpw128_mask: + case X86::BI__builtin_ia32_cmpd128_mask: + case X86::BI__builtin_ia32_cmpq128_mask: + case X86::BI__builtin_ia32_cmpb256_mask: + case X86::BI__builtin_ia32_cmpw256_mask: + case X86::BI__builtin_ia32_cmpd256_mask: + case X86::BI__builtin_ia32_cmpq256_mask: + case X86::BI__builtin_ia32_cmpb512_mask: + case X86::BI__builtin_ia32_cmpw512_mask: + case X86::BI__builtin_ia32_cmpd512_mask: + case X86::BI__builtin_ia32_cmpq512_mask: + case X86::BI__builtin_ia32_ucmpb128_mask: + case X86::BI__builtin_ia32_ucmpw128_mask: + case X86::BI__builtin_ia32_ucmpd128_mask: + case X86::BI__builtin_ia32_ucmpq128_mask: + case X86::BI__builtin_ia32_ucmpb256_mask: + case X86::BI__builtin_ia32_ucmpw256_mask: + case X86::BI__builtin_ia32_ucmpd256_mask: + case X86::BI__builtin_ia32_ucmpq256_mask: + case X86::BI__builtin_ia32_ucmpb512_mask: + case X86::BI__builtin_ia32_ucmpw512_mask: + case X86::BI__builtin_ia32_ucmpd512_mask: + case X86::BI__builtin_ia32_ucmpq512_mask: i = 2; l = 0; u = 7; break; + case X86::BI__builtin_ia32_roundps: + case X86::BI__builtin_ia32_roundpd: + case X86::BI__builtin_ia32_roundps256: + case X86::BI__builtin_ia32_roundpd256: i = 1, l = 0; u = 15; break; + case X86::BI__builtin_ia32_roundss: + case X86::BI__builtin_ia32_roundsd: i = 2, l = 0; u = 15; break; case X86::BI__builtin_ia32_cmpps: case X86::BI__builtin_ia32_cmpss: case X86::BI__builtin_ia32_cmppd: - case X86::BI__builtin_ia32_cmpsd: i = 2; l = 0; u = 31; break; + case X86::BI__builtin_ia32_cmpsd: + case X86::BI__builtin_ia32_cmpps256: + case X86::BI__builtin_ia32_cmppd256: + case X86::BI__builtin_ia32_cmpps512_mask: + case X86::BI__builtin_ia32_cmppd512_mask: i = 2; l = 0; u = 31; break; + case X86::BI__builtin_ia32_vpcomub: + case X86::BI__builtin_ia32_vpcomuw: + case X86::BI__builtin_ia32_vpcomud: + case X86::BI__builtin_ia32_vpcomuq: + case X86::BI__builtin_ia32_vpcomb: + case X86::BI__builtin_ia32_vpcomw: + case X86::BI__builtin_ia32_vpcomd: + case X86::BI__builtin_ia32_vpcomq: i = 2; l = 0; u = 7; break; } return SemaBuiltinConstantArgRange(TheCall, i, l, u); } @@ -1119,11 +1334,14 @@ bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto) { - const VarDecl *V = dyn_cast<VarDecl>(NDecl); - if (!V) + QualType Ty; + if (const auto *V = dyn_cast<VarDecl>(NDecl)) + Ty = V->getType(); + else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) + Ty = F->getType(); + else return false; - QualType Ty = V->getType(); if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType()) return false; @@ -1220,9 +1438,10 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, // M is C if C is an integer, and ptrdiff_t if C is a pointer, and // the int parameters are for orderings. - assert(AtomicExpr::AO__c11_atomic_init == 0 && - AtomicExpr::AO__c11_atomic_fetch_xor + 1 == AtomicExpr::AO__atomic_load - && "need to update code for modified C11 atomics"); + static_assert(AtomicExpr::AO__c11_atomic_init == 0 && + AtomicExpr::AO__c11_atomic_fetch_xor + 1 == + AtomicExpr::AO__atomic_load, + "need to update code for modified C11 atomics"); bool IsC11 = Op >= AtomicExpr::AO__c11_atomic_init && Op <= AtomicExpr::AO__c11_atomic_fetch_xor; bool IsN = Op == AtomicExpr::AO__atomic_load_n || @@ -2039,7 +2258,7 @@ bool Sema::SemaBuiltinVAStartARM(CallExpr *Call) { if (checkBuiltinArgument(*this, Call, 0)) return true; - static const struct { + const struct { unsigned ArgNo; QualType Type; } ArgumentTypes[] = { @@ -2286,7 +2505,7 @@ bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { if (Arg->isInstantiationDependent()) return false; if (Arg->HasSideEffects(Context)) - return Diag(Arg->getLocStart(), diag::warn_assume_side_effects) + Diag(Arg->getLocStart(), diag::warn_assume_side_effects) << Arg->getSourceRange() << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); @@ -2604,6 +2823,7 @@ Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { .Case("strfmon", FST_Strfmon) .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) .Case("freebsd_kprintf", FST_FreeBSDKPrintf) + .Case("os_trace", FST_OSTrace) .Default(FST_Unknown); } @@ -3074,10 +3294,7 @@ void CheckFormatHandler::EmitFormatDiagnostic(Sema &S, bool InFunctionCall, if (InFunctionCall) { const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); D << StringRange; - for (ArrayRef<FixItHint>::iterator I = FixIt.begin(), E = FixIt.end(); - I != E; ++I) { - D << *I; - } + D << FixIt; } else { S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) << ArgumentExpr->getSourceRange(); @@ -3087,10 +3304,7 @@ void CheckFormatHandler::EmitFormatDiagnostic(Sema &S, bool InFunctionCall, diag::note_format_string_defined); Note << StringRange; - for (ArrayRef<FixItHint>::iterator I = FixIt.begin(), E = FixIt.end(); - I != E; ++I) { - Note << *I; - } + Note << FixIt; } } @@ -3600,8 +3814,11 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, ExprTy = TET->getUnderlyingExpr()->getType(); } - if (AT.matchesType(S.Context, ExprTy)) + analyze_printf::ArgType::MatchKind match = AT.matchesType(S.Context, ExprTy); + + if (match == analyze_printf::ArgType::Match) { return true; + } // Look through argument promotions for our error message's reported type. // This includes the integral and floating promotions, but excludes array @@ -3696,16 +3913,18 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { + unsigned diag = diag::warn_format_conversion_argument_type_mismatch; + if (match == analyze_format_string::ArgType::NoMatchPedantic) { + diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; + } // In this case, the specifier is wrong and should be changed to match // the argument. - EmitFormatDiagnostic( - S.PDiag(diag::warn_format_conversion_argument_type_mismatch) - << AT.getRepresentativeTypeName(S.Context) << IntendedTy << IsEnum - << E->getSourceRange(), - E->getLocStart(), - /*IsStringLocation*/false, - SpecRange, - FixItHint::CreateReplacement(SpecRange, os.str())); + EmitFormatDiagnostic(S.PDiag(diag) + << AT.getRepresentativeTypeName(S.Context) + << IntendedTy << IsEnum << E->getSourceRange(), + E->getLocStart(), + /*IsStringLocation*/ false, SpecRange, + FixItHint::CreateReplacement(SpecRange, os.str())); } else { // The canonical type for formatting this value is different from the @@ -3779,15 +3998,18 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, // arguments here. switch (S.isValidVarArgType(ExprTy)) { case Sema::VAK_Valid: - case Sema::VAK_ValidInCXX11: + case Sema::VAK_ValidInCXX11: { + unsigned diag = diag::warn_format_conversion_argument_type_mismatch; + if (match == analyze_printf::ArgType::NoMatchPedantic) { + diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; + } + EmitFormatDiagnostic( - S.PDiag(diag::warn_format_conversion_argument_type_mismatch) - << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum - << CSR - << E->getSourceRange(), - E->getLocStart(), /*IsStringLocation*/false, CSR); + S.PDiag(diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy + << IsEnum << CSR << E->getSourceRange(), + E->getLocStart(), /*IsStringLocation*/ false, CSR); break; - + } case Sema::VAK_Undefined: case Sema::VAK_MSVCUndefined: EmitFormatDiagnostic( @@ -3919,13 +4141,13 @@ bool CheckScanfHandler::HandleScanfSpecifier( FixItHint::CreateRemoval(R)); } } - + if (!FS.consumesDataArgument()) { // FIXME: Technically specifying a precision or field width here // makes no sense. Worth issuing a warning at some point. return true; } - + // Consume the argument. unsigned argIndex = FS.getArgIndex(); if (argIndex < NumDataArgs) { @@ -3934,7 +4156,7 @@ bool CheckScanfHandler::HandleScanfSpecifier( // function if we encounter some other error. CoveredArgs.set(argIndex); } - + // Check the length modifier is valid with the given conversion specifier. if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo())) HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, @@ -3951,47 +4173,57 @@ bool CheckScanfHandler::HandleScanfSpecifier( // The remaining checks depend on the data arguments. if (HasVAListArg) return true; - + if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) return false; - + // Check that the argument type matches the format specifier. const Expr *Ex = getDataArg(argIndex); if (!Ex) return true; const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); - if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) { - ScanfSpecifier fixedFS = FS; - bool success = fixedFS.fixType(Ex->getType(), - Ex->IgnoreImpCasts()->getType(), - S.getLangOpts(), S.Context); - - if (success) { - // Get the fix string from the fixed format specifier. - SmallString<128> buf; - llvm::raw_svector_ostream os(buf); - fixedFS.toString(os); - EmitFormatDiagnostic( - S.PDiag(diag::warn_format_conversion_argument_type_mismatch) - << AT.getRepresentativeTypeName(S.Context) << Ex->getType() << false - << Ex->getSourceRange(), + if (!AT.isValid()) { + return true; + } + + analyze_format_string::ArgType::MatchKind match = + AT.matchesType(S.Context, Ex->getType()); + if (match == analyze_format_string::ArgType::Match) { + return true; + } + + ScanfSpecifier fixedFS = FS; + bool success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), + S.getLangOpts(), S.Context); + + unsigned diag = diag::warn_format_conversion_argument_type_mismatch; + if (match == analyze_format_string::ArgType::NoMatchPedantic) { + diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; + } + + if (success) { + // Get the fix string from the fixed format specifier. + SmallString<128> buf; + llvm::raw_svector_ostream os(buf); + fixedFS.toString(os); + + EmitFormatDiagnostic( + S.PDiag(diag) << AT.getRepresentativeTypeName(S.Context) + << Ex->getType() << false << Ex->getSourceRange(), Ex->getLocStart(), - /*IsStringLocation*/false, + /*IsStringLocation*/ false, getSpecifierRange(startSpecifier, specifierLen), FixItHint::CreateReplacement( - getSpecifierRange(startSpecifier, specifierLen), - os.str())); - } else { - EmitFormatDiagnostic( - S.PDiag(diag::warn_format_conversion_argument_type_mismatch) - << AT.getRepresentativeTypeName(S.Context) << Ex->getType() << false - << Ex->getSourceRange(), - Ex->getLocStart(), - /*IsStringLocation*/false, - getSpecifierRange(startSpecifier, specifierLen)); - } + getSpecifierRange(startSpecifier, specifierLen), os.str())); + } else { + EmitFormatDiagnostic(S.PDiag(diag) + << AT.getRepresentativeTypeName(S.Context) + << Ex->getType() << false << Ex->getSourceRange(), + Ex->getLocStart(), + /*IsStringLocation*/ false, + getSpecifierRange(startSpecifier, specifierLen)); } return true; @@ -4046,9 +4278,9 @@ void Sema::CheckFormatString(const StringLiteral *FExpr, } if (Type == FST_Printf || Type == FST_NSString || - Type == FST_FreeBSDKPrintf) { + Type == FST_FreeBSDKPrintf || Type == FST_OSTrace) { CheckPrintfHandler H(*this, FExpr, OrigFormatExpr, firstDataArg, - numDataArgs, (Type == FST_NSString), + numDataArgs, (Type == FST_NSString || Type == FST_OSTrace), Str, HasVAListArg, Args, format_idx, inFunctionCall, CallType, CheckedVarArgs); @@ -4533,7 +4765,7 @@ static const CXXRecordDecl *getContainedDynamicClass(QualType T, /// \brief If E is a sizeof expression, returns its argument expression, /// otherwise returns NULL. -static const Expr *getSizeOfExprArg(const Expr* E) { +static const Expr *getSizeOfExprArg(const Expr *E) { if (const UnaryExprOrTypeTraitExpr *SizeOf = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) if (SizeOf->getKind() == clang::UETT_SizeOf && !SizeOf->isArgumentType()) @@ -4543,7 +4775,7 @@ static const Expr *getSizeOfExprArg(const Expr* E) { } /// \brief If E is a sizeof expression, returns its argument type. -static QualType getSizeOfArgType(const Expr* E) { +static QualType getSizeOfArgType(const Expr *E) { if (const UnaryExprOrTypeTraitExpr *SizeOf = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) if (SizeOf->getKind() == clang::UETT_SizeOf) @@ -4589,8 +4821,9 @@ void Sema::CheckMemaccessArguments(const CallExpr *Call, SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); QualType DestTy = Dest->getType(); + QualType PointeeTy; if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { - QualType PointeeTy = DestPtrTy->getPointeeType(); + PointeeTy = DestPtrTy->getPointeeType(); // Never warn about void type pointers. This can be used to suppress // false positives. @@ -4670,47 +4903,53 @@ void Sema::CheckMemaccessArguments(const CallExpr *Call, break; } } + } else if (DestTy->isArrayType()) { + PointeeTy = DestTy; + } - // Always complain about dynamic classes. - bool IsContained; - if (const CXXRecordDecl *ContainedRD = - getContainedDynamicClass(PointeeTy, IsContained)) { - - unsigned OperationType = 0; - // "overwritten" if we're warning about the destination for any call - // but memcmp; otherwise a verb appropriate to the call. - if (ArgIdx != 0 || BId == Builtin::BImemcmp) { - if (BId == Builtin::BImemcpy) - OperationType = 1; - else if(BId == Builtin::BImemmove) - OperationType = 2; - else if (BId == Builtin::BImemcmp) - OperationType = 3; - } - - DiagRuntimeBehavior( - Dest->getExprLoc(), Dest, - PDiag(diag::warn_dyn_class_memaccess) - << (BId == Builtin::BImemcmp ? ArgIdx + 2 : ArgIdx) - << FnName << IsContained << ContainedRD << OperationType - << Call->getCallee()->getSourceRange()); - } else if (PointeeTy.hasNonTrivialObjCLifetime() && - BId != Builtin::BImemset) - DiagRuntimeBehavior( - Dest->getExprLoc(), Dest, - PDiag(diag::warn_arc_object_memaccess) - << ArgIdx << FnName << PointeeTy - << Call->getCallee()->getSourceRange()); - else - continue; + if (PointeeTy == QualType()) + continue; + // Always complain about dynamic classes. + bool IsContained; + if (const CXXRecordDecl *ContainedRD = + getContainedDynamicClass(PointeeTy, IsContained)) { + + unsigned OperationType = 0; + // "overwritten" if we're warning about the destination for any call + // but memcmp; otherwise a verb appropriate to the call. + if (ArgIdx != 0 || BId == Builtin::BImemcmp) { + if (BId == Builtin::BImemcpy) + OperationType = 1; + else if(BId == Builtin::BImemmove) + OperationType = 2; + else if (BId == Builtin::BImemcmp) + OperationType = 3; + } + DiagRuntimeBehavior( Dest->getExprLoc(), Dest, - PDiag(diag::note_bad_memaccess_silence) - << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); - break; - } + PDiag(diag::warn_dyn_class_memaccess) + << (BId == Builtin::BImemcmp ? ArgIdx + 2 : ArgIdx) + << FnName << IsContained << ContainedRD << OperationType + << Call->getCallee()->getSourceRange()); + } else if (PointeeTy.hasNonTrivialObjCLifetime() && + BId != Builtin::BImemset) + DiagRuntimeBehavior( + Dest->getExprLoc(), Dest, + PDiag(diag::warn_arc_object_memaccess) + << ArgIdx << FnName << PointeeTy + << Call->getCallee()->getSourceRange()); + else + continue; + + DiagRuntimeBehavior( + Dest->getExprLoc(), Dest, + PDiag(diag::note_bad_memaccess_silence) + << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); + break; } + } // A little helper routine: ignore addition and subtraction of integer literals. @@ -5861,7 +6100,7 @@ static void DiagnoseOutOfRangeComparison(Sema &S, BinaryOperator *E, // TODO: Investigate using GetExprRange() to get tighter bounds // on the bit ranges. QualType OtherT = Other->getType(); - if (const AtomicType *AT = dyn_cast<AtomicType>(OtherT)) + if (const auto *AT = OtherT->getAs<AtomicType>()) OtherT = AT->getValueType(); IntRange OtherRange = IntRange::forValueOfType(S.Context, OtherT); unsigned OtherWidth = OtherRange.Width; @@ -6705,8 +6944,11 @@ void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC) { E = POE->getResultExpr(); } - if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) - return AnalyzeImplicitConversions(S, OVE->getSourceExpr(), CC); + if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) { + if (OVE->getSourceExpr()) + AnalyzeImplicitConversions(S, OVE->getSourceExpr(), CC); + return; + } // Skip past explicit casts. if (isa<ExplicitCastExpr>(E)) { @@ -6788,7 +7030,7 @@ static bool CheckForReference(Sema &SemaRef, const Expr *E, if (!M->getMemberDecl()->getType()->isReferenceType()) return false; } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { - if (!Call->getCallReturnType()->isReferenceType()) + if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) return false; FD = Call->getDirectCallee(); } else { @@ -7533,6 +7775,35 @@ void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); } +static void diagnoseArrayStarInParamType(Sema &S, QualType PType, + SourceLocation Loc) { + if (!PType->isVariablyModifiedType()) + return; + if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { + diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); + return; + } + if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { + diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); + return; + } + if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { + diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); + return; + } + + const ArrayType *AT = S.Context.getAsArrayType(PType); + if (!AT) + return; + + if (AT->getSizeModifier() != ArrayType::Star) { + diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); + return; + } + + S.Diag(Loc, diag::err_array_star_in_function_definition); +} + /// CheckParmsForFunctionDef - Check that the parameters of the given /// function are appropriate for the definition of a function. This /// takes care of any checks that cannot be performed on the @@ -7571,15 +7842,9 @@ bool Sema::CheckParmsForFunctionDef(ParmVarDecl *const *P, // notation in their sequences of declarator specifiers to specify // variable length array types. QualType PType = Param->getOriginalType(); - while (const ArrayType *AT = Context.getAsArrayType(PType)) { - if (AT->getSizeModifier() == ArrayType::Star) { - // FIXME: This diagnostic should point the '[*]' if source-location - // information is added for it. - Diag(Param->getLocation(), diag::err_array_star_in_function_definition); - break; - } - PType= AT->getElementType(); - } + // FIXME: This diagnostic should point the '[*]' if source-location + // information is added for it. + diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); // MSVC destroys objects passed by value in the callee. Therefore a // function definition which takes such a parameter must be able to call the @@ -8097,6 +8362,236 @@ static bool isSetterLikeSelector(Selector sel) { return !isLowercase(str.front()); } +static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, + ObjCMessageExpr *Message) { + if (S.NSMutableArrayPointer.isNull()) { + IdentifierInfo *NSMutableArrayId = + S.NSAPIObj->getNSClassId(NSAPI::ClassId_NSMutableArray); + NamedDecl *IF = S.LookupSingleName(S.TUScope, NSMutableArrayId, + Message->getLocStart(), + Sema::LookupOrdinaryName); + ObjCInterfaceDecl *InterfaceDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF); + if (!InterfaceDecl) { + return None; + } + QualType NSMutableArrayObject = + S.Context.getObjCInterfaceType(InterfaceDecl); + S.NSMutableArrayPointer = + S.Context.getObjCObjectPointerType(NSMutableArrayObject); + } + + if (S.NSMutableArrayPointer != Message->getReceiverType()) { + return None; + } + + Selector Sel = Message->getSelector(); + + Optional<NSAPI::NSArrayMethodKind> MKOpt = + S.NSAPIObj->getNSArrayMethodKind(Sel); + if (!MKOpt) { + return None; + } + + NSAPI::NSArrayMethodKind MK = *MKOpt; + + switch (MK) { + case NSAPI::NSMutableArr_addObject: + case NSAPI::NSMutableArr_insertObjectAtIndex: + case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: + return 0; + case NSAPI::NSMutableArr_replaceObjectAtIndex: + return 1; + + default: + return None; + } + + return None; +} + +static +Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, + ObjCMessageExpr *Message) { + + if (S.NSMutableDictionaryPointer.isNull()) { + IdentifierInfo *NSMutableDictionaryId = + S.NSAPIObj->getNSClassId(NSAPI::ClassId_NSMutableDictionary); + NamedDecl *IF = S.LookupSingleName(S.TUScope, NSMutableDictionaryId, + Message->getLocStart(), + Sema::LookupOrdinaryName); + ObjCInterfaceDecl *InterfaceDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF); + if (!InterfaceDecl) { + return None; + } + QualType NSMutableDictionaryObject = + S.Context.getObjCInterfaceType(InterfaceDecl); + S.NSMutableDictionaryPointer = + S.Context.getObjCObjectPointerType(NSMutableDictionaryObject); + } + + if (S.NSMutableDictionaryPointer != Message->getReceiverType()) { + return None; + } + + Selector Sel = Message->getSelector(); + + Optional<NSAPI::NSDictionaryMethodKind> MKOpt = + S.NSAPIObj->getNSDictionaryMethodKind(Sel); + if (!MKOpt) { + return None; + } + + NSAPI::NSDictionaryMethodKind MK = *MKOpt; + + switch (MK) { + case NSAPI::NSMutableDict_setObjectForKey: + case NSAPI::NSMutableDict_setValueForKey: + case NSAPI::NSMutableDict_setObjectForKeyedSubscript: + return 0; + + default: + return None; + } + + return None; +} + +static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { + + ObjCInterfaceDecl *InterfaceDecl; + if (S.NSMutableSetPointer.isNull()) { + IdentifierInfo *NSMutableSetId = + S.NSAPIObj->getNSClassId(NSAPI::ClassId_NSMutableSet); + NamedDecl *IF = S.LookupSingleName(S.TUScope, NSMutableSetId, + Message->getLocStart(), + Sema::LookupOrdinaryName); + InterfaceDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF); + if (InterfaceDecl) { + QualType NSMutableSetObject = + S.Context.getObjCInterfaceType(InterfaceDecl); + S.NSMutableSetPointer = + S.Context.getObjCObjectPointerType(NSMutableSetObject); + } + } + + if (S.NSCountedSetPointer.isNull()) { + IdentifierInfo *NSCountedSetId = + S.NSAPIObj->getNSClassId(NSAPI::ClassId_NSCountedSet); + NamedDecl *IF = S.LookupSingleName(S.TUScope, NSCountedSetId, + Message->getLocStart(), + Sema::LookupOrdinaryName); + InterfaceDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF); + if (InterfaceDecl) { + QualType NSCountedSetObject = + S.Context.getObjCInterfaceType(InterfaceDecl); + S.NSCountedSetPointer = + S.Context.getObjCObjectPointerType(NSCountedSetObject); + } + } + + if (S.NSMutableOrderedSetPointer.isNull()) { + IdentifierInfo *NSOrderedSetId = + S.NSAPIObj->getNSClassId(NSAPI::ClassId_NSMutableOrderedSet); + NamedDecl *IF = S.LookupSingleName(S.TUScope, NSOrderedSetId, + Message->getLocStart(), + Sema::LookupOrdinaryName); + InterfaceDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF); + if (InterfaceDecl) { + QualType NSOrderedSetObject = + S.Context.getObjCInterfaceType(InterfaceDecl); + S.NSMutableOrderedSetPointer = + S.Context.getObjCObjectPointerType(NSOrderedSetObject); + } + } + + QualType ReceiverType = Message->getReceiverType(); + + bool IsMutableSet = !S.NSMutableSetPointer.isNull() && + ReceiverType == S.NSMutableSetPointer; + bool IsMutableOrderedSet = !S.NSMutableOrderedSetPointer.isNull() && + ReceiverType == S.NSMutableOrderedSetPointer; + bool IsCountedSet = !S.NSCountedSetPointer.isNull() && + ReceiverType == S.NSCountedSetPointer; + + if (!IsMutableSet && !IsMutableOrderedSet && !IsCountedSet) { + return None; + } + + Selector Sel = Message->getSelector(); + + Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); + if (!MKOpt) { + return None; + } + + NSAPI::NSSetMethodKind MK = *MKOpt; + + switch (MK) { + case NSAPI::NSMutableSet_addObject: + case NSAPI::NSOrderedSet_setObjectAtIndex: + case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: + case NSAPI::NSOrderedSet_insertObjectAtIndex: + return 0; + case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: + return 1; + } + + return None; +} + +void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { + if (!Message->isInstanceMessage()) { + return; + } + + Optional<int> ArgOpt; + + if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && + !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && + !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { + return; + } + + int ArgIndex = *ArgOpt; + + Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); + if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { + Receiver = OE->getSourceExpr()->IgnoreImpCasts(); + } + + Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); + if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { + Arg = OE->getSourceExpr()->IgnoreImpCasts(); + } + + if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { + if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { + if (ReceiverRE->getDecl() == ArgRE->getDecl()) { + ValueDecl *Decl = ReceiverRE->getDecl(); + Diag(Message->getSourceRange().getBegin(), + diag::warn_objc_circular_container) + << Decl->getName(); + Diag(Decl->getLocation(), + diag::note_objc_circular_container_declared_here) + << Decl->getName(); + } + } + } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { + if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { + if (IvarRE->getDecl() == IvarArgRE->getDecl()) { + ObjCIvarDecl *Decl = IvarRE->getDecl(); + Diag(Message->getSourceRange().getBegin(), + diag::warn_objc_circular_container) + << Decl->getName(); + Diag(Decl->getLocation(), + diag::note_objc_circular_container_declared_here) + << Decl->getName(); + } + } + } + +} + /// Check a message send to see if it's likely to cause a retain cycle. void Sema::checkRetainCycles(ObjCMessageExpr *msg) { // Only check instance methods whose selector looks like a setter. @@ -8281,7 +8776,7 @@ bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, // Get line numbers of statement and body. bool StmtLineInvalid; - unsigned StmtLine = SourceMgr.getSpellingLineNumber(StmtLoc, + unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, &StmtLineInvalid); if (StmtLineInvalid) return false; diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp index 48bdd2a..18d352b 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp @@ -495,7 +495,6 @@ bool ResultBuilder::isInterestingDecl(const NamedDecl *ND, AsNestedNameSpecifier = false; ND = ND->getUnderlyingDecl(); - unsigned IDNS = ND->getIdentifierNamespace(); // Skip unnamed entities. if (!ND->getDeclName()) @@ -503,7 +502,7 @@ bool ResultBuilder::isInterestingDecl(const NamedDecl *ND, // Friend declarations and declarations introduced due to friends are never // added as results. - if (IDNS & (Decl::IDNS_OrdinaryFriend | Decl::IDNS_TagFriend)) + if (ND->getFriendObjectKind() == Decl::FOK_Undeclared) return false; // Class template (partial) specializations are never added as results. @@ -800,8 +799,8 @@ void ResultBuilder::MaybeAddConstructorResults(Result R) { DeclarationName ConstructorName = Context.DeclarationNames.getCXXConstructorName( Context.getCanonicalType(RecordTy)); - DeclContext::lookup_const_result Ctors = Record->lookup(ConstructorName); - for (DeclContext::lookup_const_iterator I = Ctors.begin(), + DeclContext::lookup_result Ctors = Record->lookup(ConstructorName); + for (DeclContext::lookup_iterator I = Ctors.begin(), E = Ctors.end(); I != E; ++I) { R.Declaration = *I; @@ -2018,7 +2017,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, if (SemaRef.getLangOpts().C11) { // _Alignof Builder.AddResultTypeChunk("size_t"); - if (SemaRef.getASTContext().Idents.get("alignof").hasMacroDefinition()) + if (SemaRef.PP.isMacroDefined("alignof")) Builder.AddTypedTextChunk("alignof"); else Builder.AddTypedTextChunk("_Alignof"); @@ -2086,15 +2085,14 @@ static void AddResultTypeChunk(ASTContext &Context, Result.getAllocator())); } -static void MaybeAddSentinel(ASTContext &Context, +static void MaybeAddSentinel(Preprocessor &PP, const NamedDecl *FunctionOrMethod, CodeCompletionBuilder &Result) { if (SentinelAttr *Sentinel = FunctionOrMethod->getAttr<SentinelAttr>()) if (Sentinel->getSentinel() == 0) { - if (Context.getLangOpts().ObjC1 && - Context.Idents.get("nil").hasMacroDefinition()) + if (PP.getLangOpts().ObjC1 && PP.isMacroDefined("nil")) Result.AddTextChunk(", nil"); - else if (Context.Idents.get("NULL").hasMacroDefinition()) + else if (PP.isMacroDefined("NULL")) Result.AddTextChunk(", NULL"); else Result.AddTextChunk(", (void*)0"); @@ -2118,8 +2116,7 @@ static std::string formatObjCParamQualifiers(unsigned ObjCQuals) { return Result; } -static std::string FormatFunctionParameter(ASTContext &Context, - const PrintingPolicy &Policy, +static std::string FormatFunctionParameter(const PrintingPolicy &Policy, const ParmVarDecl *Param, bool SuppressName = false, bool SuppressBlock = false) { @@ -2218,7 +2215,7 @@ static std::string FormatFunctionParameter(ASTContext &Context, for (unsigned I = 0, N = Block.getNumParams(); I != N; ++I) { if (I) Params += ", "; - Params += FormatFunctionParameter(Context, Policy, Block.getParam(I), + Params += FormatFunctionParameter(Policy, Block.getParam(I), /*SuppressName=*/false, /*SuppressBlock=*/true); @@ -2248,7 +2245,7 @@ static std::string FormatFunctionParameter(ASTContext &Context, } /// \brief Add function parameter chunks to the given code completion string. -static void AddFunctionParameterChunks(ASTContext &Context, +static void AddFunctionParameterChunks(Preprocessor &PP, const PrintingPolicy &Policy, const FunctionDecl *Function, CodeCompletionBuilder &Result, @@ -2266,7 +2263,7 @@ static void AddFunctionParameterChunks(ASTContext &Context, Result.getCodeCompletionTUInfo()); if (!FirstParameter) Opt.AddChunk(CodeCompletionString::CK_Comma); - AddFunctionParameterChunks(Context, Policy, Function, Opt, P, true); + AddFunctionParameterChunks(PP, Policy, Function, Opt, P, true); Result.AddOptionalChunk(Opt.TakeString()); break; } @@ -2279,9 +2276,8 @@ static void AddFunctionParameterChunks(ASTContext &Context, InOptional = false; // Format the placeholder string. - std::string PlaceholderStr = FormatFunctionParameter(Context, Policy, - Param); - + std::string PlaceholderStr = FormatFunctionParameter(Policy, Param); + if (Function->isVariadic() && P == N - 1) PlaceholderStr += ", ..."; @@ -2296,7 +2292,7 @@ static void AddFunctionParameterChunks(ASTContext &Context, if (Proto->getNumParams() == 0) Result.AddPlaceholderChunk("..."); - MaybeAddSentinel(Context, Function, Result); + MaybeAddSentinel(PP, Function, Result); } } @@ -2309,7 +2305,11 @@ static void AddTemplateParameterChunks(ASTContext &Context, unsigned Start = 0, bool InDefaultArg = false) { bool FirstParameter = true; - + + // Prefer to take the template parameter names from the first declaration of + // the template. + Template = cast<TemplateDecl>(Template->getCanonicalDecl()); + TemplateParameterList *Params = Template->getTemplateParameters(); TemplateParameterList::iterator PEnd = Params->end(); if (MaxParameters) @@ -2572,11 +2572,7 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx, } if (Kind == RK_Macro) { - const MacroDirective *MD = PP.getMacroDirectiveHistory(Macro); - assert(MD && "Not a macro?"); - const MacroInfo *MI = MD->getMacroInfo(); - assert((!MD->isDefined() || MI) && "missing MacroInfo for define"); - + const MacroInfo *MI = PP.getMacroInfo(Macro); Result.AddTypedTextChunk( Result.getAllocator().CopyString(Macro->getName())); @@ -2651,7 +2647,7 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx, Ctx, Policy); AddTypedNameChunk(Ctx, Policy, ND, Result); Result.AddChunk(CodeCompletionString::CK_LeftParen); - AddFunctionParameterChunks(Ctx, Policy, Function, Result); + AddFunctionParameterChunks(PP, Policy, Function, Result); Result.AddChunk(CodeCompletionString::CK_RightParen); AddFunctionTypeQualsToCompletionString(Result, Function); return Result.TakeString(); @@ -2705,7 +2701,7 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx, // Add the function parameters Result.AddChunk(CodeCompletionString::CK_LeftParen); - AddFunctionParameterChunks(Ctx, Policy, Function, Result); + AddFunctionParameterChunks(PP, Policy, Function, Result); Result.AddChunk(CodeCompletionString::CK_RightParen); AddFunctionTypeQualsToCompletionString(Result, Function); return Result.TakeString(); @@ -2766,7 +2762,7 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx, std::string Arg; if ((*P)->getType()->isBlockPointerType() && !DeclaringEntity) - Arg = FormatFunctionParameter(Ctx, Policy, *P, true); + Arg = FormatFunctionParameter(Policy, *P, true); else { (*P)->getType().getAsStringInternal(Arg, Policy); Arg = "(" + formatObjCParamQualifiers((*P)->getObjCDeclQualifier()) @@ -2797,7 +2793,7 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx, Result.AddPlaceholderChunk(", ..."); } - MaybeAddSentinel(Ctx, Method, Result); + MaybeAddSentinel(PP, Method, Result); } return Result.TakeString(); @@ -2812,73 +2808,115 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx, return Result.TakeString(); } +/// \brief Add function overload parameter chunks to the given code completion +/// string. +static void AddOverloadParameterChunks(ASTContext &Context, + const PrintingPolicy &Policy, + const FunctionDecl *Function, + const FunctionProtoType *Prototype, + CodeCompletionBuilder &Result, + unsigned CurrentArg, + unsigned Start = 0, + bool InOptional = false) { + bool FirstParameter = true; + unsigned NumParams = Function ? Function->getNumParams() + : Prototype->getNumParams(); + + for (unsigned P = Start; P != NumParams; ++P) { + if (Function && Function->getParamDecl(P)->hasDefaultArg() && !InOptional) { + // When we see an optional default argument, put that argument and + // the remaining default arguments into a new, optional string. + CodeCompletionBuilder Opt(Result.getAllocator(), + Result.getCodeCompletionTUInfo()); + if (!FirstParameter) + Opt.AddChunk(CodeCompletionString::CK_Comma); + // Optional sections are nested. + AddOverloadParameterChunks(Context, Policy, Function, Prototype, Opt, + CurrentArg, P, /*InOptional=*/true); + Result.AddOptionalChunk(Opt.TakeString()); + return; + } + + if (FirstParameter) + FirstParameter = false; + else + Result.AddChunk(CodeCompletionString::CK_Comma); + + InOptional = false; + + // Format the placeholder string. + std::string Placeholder; + if (Function) + Placeholder = FormatFunctionParameter(Policy, Function->getParamDecl(P)); + else + Placeholder = Prototype->getParamType(P).getAsString(Policy); + + if (P == CurrentArg) + Result.AddCurrentParameterChunk( + Result.getAllocator().CopyString(Placeholder)); + else + Result.AddPlaceholderChunk(Result.getAllocator().CopyString(Placeholder)); + } + + if (Prototype && Prototype->isVariadic()) { + CodeCompletionBuilder Opt(Result.getAllocator(), + Result.getCodeCompletionTUInfo()); + if (!FirstParameter) + Opt.AddChunk(CodeCompletionString::CK_Comma); + + if (CurrentArg < NumParams) + Opt.AddPlaceholderChunk("..."); + else + Opt.AddCurrentParameterChunk("..."); + + Result.AddOptionalChunk(Opt.TakeString()); + } +} + CodeCompletionString * CodeCompleteConsumer::OverloadCandidate::CreateSignatureString( - unsigned CurrentArg, - Sema &S, - CodeCompletionAllocator &Allocator, - CodeCompletionTUInfo &CCTUInfo) const { + unsigned CurrentArg, Sema &S, + CodeCompletionAllocator &Allocator, + CodeCompletionTUInfo &CCTUInfo, + bool IncludeBriefComments) const { PrintingPolicy Policy = getCompletionPrintingPolicy(S); // FIXME: Set priority, availability appropriately. CodeCompletionBuilder Result(Allocator,CCTUInfo, 1, CXAvailability_Available); FunctionDecl *FDecl = getFunction(); - AddResultTypeChunk(S.Context, Policy, FDecl, Result); - const FunctionProtoType *Proto + const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(getFunctionType()); if (!FDecl && !Proto) { // Function without a prototype. Just give the return type and a // highlighted ellipsis. const FunctionType *FT = getFunctionType(); - Result.AddTextChunk(GetCompletionTypeString(FT->getReturnType(), S.Context, - Policy, Result.getAllocator())); + Result.AddResultTypeChunk(Result.getAllocator().CopyString( + FT->getReturnType().getAsString(Policy))); Result.AddChunk(CodeCompletionString::CK_LeftParen); Result.AddChunk(CodeCompletionString::CK_CurrentParameter, "..."); Result.AddChunk(CodeCompletionString::CK_RightParen); return Result.TakeString(); } - - if (FDecl) + + if (FDecl) { + if (IncludeBriefComments && CurrentArg < FDecl->getNumParams()) + if (auto RC = S.getASTContext().getRawCommentForAnyRedecl( + FDecl->getParamDecl(CurrentArg))) + Result.addBriefComment(RC->getBriefText(S.getASTContext())); + AddResultTypeChunk(S.Context, Policy, FDecl, Result); Result.AddTextChunk( - Result.getAllocator().CopyString(FDecl->getNameAsString())); - else - Result.AddTextChunk(Result.getAllocator().CopyString( + Result.getAllocator().CopyString(FDecl->getNameAsString())); + } else { + Result.AddResultTypeChunk( + Result.getAllocator().CopyString( Proto->getReturnType().getAsString(Policy))); + } Result.AddChunk(CodeCompletionString::CK_LeftParen); - unsigned NumParams = FDecl ? FDecl->getNumParams() : Proto->getNumParams(); - for (unsigned I = 0; I != NumParams; ++I) { - if (I) - Result.AddChunk(CodeCompletionString::CK_Comma); - - std::string ArgString; - QualType ArgType; - - if (FDecl) { - ArgString = FDecl->getParamDecl(I)->getNameAsString(); - ArgType = FDecl->getParamDecl(I)->getOriginalType(); - } else { - ArgType = Proto->getParamType(I); - } - - ArgType.getAsStringInternal(ArgString, Policy); - - if (I == CurrentArg) - Result.AddChunk(CodeCompletionString::CK_CurrentParameter, - Result.getAllocator().CopyString(ArgString)); - else - Result.AddTextChunk(Result.getAllocator().CopyString(ArgString)); - } - - if (Proto && Proto->isVariadic()) { - Result.AddChunk(CodeCompletionString::CK_Comma); - if (CurrentArg < NumParams) - Result.AddTextChunk("..."); - else - Result.AddChunk(CodeCompletionString::CK_CurrentParameter, "..."); - } + AddOverloadParameterChunks(S.getASTContext(), Policy, FDecl, Proto, Result, + CurrentArg); Result.AddChunk(CodeCompletionString::CK_RightParen); - + return Result.TakeString(); } @@ -2990,8 +3028,9 @@ static void AddMacroResults(Preprocessor &PP, ResultBuilder &Results, for (Preprocessor::macro_iterator M = PP.macro_begin(), MEnd = PP.macro_end(); M != MEnd; ++M) { - if (IncludeUndefined || M->first->hasMacroDefinition()) { - if (MacroInfo *MI = M->second->getMacroInfo()) + auto MD = PP.getMacroDefinition(M->first); + if (IncludeUndefined || MD) { + if (MacroInfo *MI = MD.getMacroInfo()) if (MI->isUsedForHeaderGuard()) continue; @@ -3816,7 +3855,74 @@ static bool anyNullArguments(ArrayRef<Expr *> Args) { return false; } -void Sema::CodeCompleteCall(Scope *S, Expr *FnIn, ArrayRef<Expr *> Args) { +typedef CodeCompleteConsumer::OverloadCandidate ResultCandidate; + +static void mergeCandidatesWithResults(Sema &SemaRef, + SmallVectorImpl<ResultCandidate> &Results, + OverloadCandidateSet &CandidateSet, + SourceLocation Loc) { + if (!CandidateSet.empty()) { + // Sort the overload candidate set by placing the best overloads first. + std::stable_sort( + CandidateSet.begin(), CandidateSet.end(), + [&](const OverloadCandidate &X, const OverloadCandidate &Y) { + return isBetterOverloadCandidate(SemaRef, X, Y, Loc); + }); + + // Add the remaining viable overload candidates as code-completion results. + for (auto &Candidate : CandidateSet) + if (Candidate.Viable) + Results.push_back(ResultCandidate(Candidate.Function)); + } +} + +/// \brief Get the type of the Nth parameter from a given set of overload +/// candidates. +static QualType getParamType(Sema &SemaRef, + ArrayRef<ResultCandidate> Candidates, + unsigned N) { + + // Given the overloads 'Candidates' for a function call matching all arguments + // up to N, return the type of the Nth parameter if it is the same for all + // overload candidates. + QualType ParamType; + for (auto &Candidate : Candidates) { + if (auto FType = Candidate.getFunctionType()) + if (auto Proto = dyn_cast<FunctionProtoType>(FType)) + if (N < Proto->getNumParams()) { + if (ParamType.isNull()) + ParamType = Proto->getParamType(N); + else if (!SemaRef.Context.hasSameUnqualifiedType( + ParamType.getNonReferenceType(), + Proto->getParamType(N).getNonReferenceType())) + // Otherwise return a default-constructed QualType. + return QualType(); + } + } + + return ParamType; +} + +static void CodeCompleteOverloadResults(Sema &SemaRef, Scope *S, + MutableArrayRef<ResultCandidate> Candidates, + unsigned CurrentArg, + bool CompleteExpressionWithCurrentArg = true) { + QualType ParamType; + if (CompleteExpressionWithCurrentArg) + ParamType = getParamType(SemaRef, Candidates, CurrentArg); + + if (ParamType.isNull()) + SemaRef.CodeCompleteOrdinaryName(S, Sema::PCC_Expression); + else + SemaRef.CodeCompleteExpression(S, ParamType); + + if (!Candidates.empty()) + SemaRef.CodeCompleter->ProcessOverloadCandidates(SemaRef, CurrentArg, + Candidates.data(), + Candidates.size()); +} + +void Sema::CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args) { if (!CodeCompleter) return; @@ -3825,7 +3931,7 @@ void Sema::CodeCompleteCall(Scope *S, Expr *FnIn, ArrayRef<Expr *> Args) { // results. We may want to revisit this strategy in the future, // e.g., by merging the two kinds of results. - Expr *Fn = (Expr *)FnIn; + // FIXME: Provide support for variadic template functions. // Ignore type-dependent call expressions entirely. if (!Fn || Fn->isTypeDependent() || anyNullArguments(Args) || @@ -3838,92 +3944,120 @@ void Sema::CodeCompleteCall(Scope *S, Expr *FnIn, ArrayRef<Expr *> Args) { SourceLocation Loc = Fn->getExprLoc(); OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal); - // FIXME: What if we're calling something that isn't a function declaration? - // FIXME: What if we're calling a pseudo-destructor? - // FIXME: What if we're calling a member function? - - typedef CodeCompleteConsumer::OverloadCandidate ResultCandidate; SmallVector<ResultCandidate, 8> Results; Expr *NakedFn = Fn->IgnoreParenCasts(); - if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(NakedFn)) + if (auto ULE = dyn_cast<UnresolvedLookupExpr>(NakedFn)) AddOverloadedCallCandidates(ULE, Args, CandidateSet, - /*PartialOverloading=*/ true); - else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(NakedFn)) { - FunctionDecl *FDecl = dyn_cast<FunctionDecl>(DRE->getDecl()); - if (FDecl) { - if (!getLangOpts().CPlusPlus || - !FDecl->getType()->getAs<FunctionProtoType>()) - Results.push_back(ResultCandidate(FDecl)); + /*PartialOverloading=*/true); + else if (auto UME = dyn_cast<UnresolvedMemberExpr>(NakedFn)) { + TemplateArgumentListInfo TemplateArgsBuffer, *TemplateArgs = nullptr; + if (UME->hasExplicitTemplateArgs()) { + UME->copyTemplateArgumentsInto(TemplateArgsBuffer); + TemplateArgs = &TemplateArgsBuffer; + } + SmallVector<Expr *, 12> ArgExprs(1, UME->getBase()); + ArgExprs.append(Args.begin(), Args.end()); + UnresolvedSet<8> Decls; + Decls.append(UME->decls_begin(), UME->decls_end()); + AddFunctionCandidates(Decls, ArgExprs, CandidateSet, TemplateArgs, + /*SuppressUsedConversions=*/false, + /*PartialOverloading=*/true); + } else { + FunctionDecl *FD = nullptr; + if (auto MCE = dyn_cast<MemberExpr>(NakedFn)) + FD = dyn_cast<FunctionDecl>(MCE->getMemberDecl()); + else if (auto DRE = dyn_cast<DeclRefExpr>(NakedFn)) + FD = dyn_cast<FunctionDecl>(DRE->getDecl()); + if (FD) { // We check whether it's a resolved function declaration. + if (!getLangOpts().CPlusPlus || + !FD->getType()->getAs<FunctionProtoType>()) + Results.push_back(ResultCandidate(FD)); else - // FIXME: access? - AddOverloadCandidate(FDecl, DeclAccessPair::make(FDecl, AS_none), Args, - CandidateSet, false, /*PartialOverloading*/true); + AddOverloadCandidate(FD, DeclAccessPair::make(FD, FD->getAccess()), + Args, CandidateSet, + /*SuppressUsedConversions=*/false, + /*PartialOverloading=*/true); + + } else if (auto DC = NakedFn->getType()->getAsCXXRecordDecl()) { + // If expression's type is CXXRecordDecl, it may overload the function + // call operator, so we check if it does and add them as candidates. + // A complete type is needed to lookup for member function call operators. + if (!RequireCompleteType(Loc, NakedFn->getType(), 0)) { + DeclarationName OpName = Context.DeclarationNames + .getCXXOperatorName(OO_Call); + LookupResult R(*this, OpName, Loc, LookupOrdinaryName); + LookupQualifiedName(R, DC); + R.suppressDiagnostics(); + SmallVector<Expr *, 12> ArgExprs(1, NakedFn); + ArgExprs.append(Args.begin(), Args.end()); + AddFunctionCandidates(R.asUnresolvedSet(), ArgExprs, CandidateSet, + /*ExplicitArgs=*/nullptr, + /*SuppressUsedConversions=*/false, + /*PartialOverloading=*/true); + } + } else { + // Lastly we check whether expression's type is function pointer or + // function. + QualType T = NakedFn->getType(); + if (!T->getPointeeType().isNull()) + T = T->getPointeeType(); + + if (auto FP = T->getAs<FunctionProtoType>()) { + if (!TooManyArguments(FP->getNumParams(), Args.size(), + /*PartialOverloading=*/true) || + FP->isVariadic()) + Results.push_back(ResultCandidate(FP)); + } else if (auto FT = T->getAs<FunctionType>()) + // No prototype and declaration, it may be a K & R style function. + Results.push_back(ResultCandidate(FT)); } } - - QualType ParamType; - - if (!CandidateSet.empty()) { - // Sort the overload candidate set by placing the best overloads first. - std::stable_sort( - CandidateSet.begin(), CandidateSet.end(), - [&](const OverloadCandidate &X, const OverloadCandidate &Y) { - return isBetterOverloadCandidate(*this, X, Y, Loc); - }); - // Add the remaining viable overload candidates as code-completion reslults. - for (OverloadCandidateSet::iterator Cand = CandidateSet.begin(), - CandEnd = CandidateSet.end(); - Cand != CandEnd; ++Cand) { - if (Cand->Viable) - Results.push_back(ResultCandidate(Cand->Function)); - } - - // From the viable candidates, try to determine the type of this parameter. - for (unsigned I = 0, N = Results.size(); I != N; ++I) { - if (const FunctionType *FType = Results[I].getFunctionType()) - if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FType)) - if (Args.size() < Proto->getNumParams()) { - if (ParamType.isNull()) - ParamType = Proto->getParamType(Args.size()); - else if (!Context.hasSameUnqualifiedType( - ParamType.getNonReferenceType(), - Proto->getParamType(Args.size()) - .getNonReferenceType())) { - ParamType = QualType(); - break; - } - } - } - } else { - // Try to determine the parameter type from the type of the expression - // being called. - QualType FunctionType = Fn->getType(); - if (const PointerType *Ptr = FunctionType->getAs<PointerType>()) - FunctionType = Ptr->getPointeeType(); - else if (const BlockPointerType *BlockPtr - = FunctionType->getAs<BlockPointerType>()) - FunctionType = BlockPtr->getPointeeType(); - else if (const MemberPointerType *MemPtr - = FunctionType->getAs<MemberPointerType>()) - FunctionType = MemPtr->getPointeeType(); - - if (const FunctionProtoType *Proto - = FunctionType->getAs<FunctionProtoType>()) { - if (Args.size() < Proto->getNumParams()) - ParamType = Proto->getParamType(Args.size()); + mergeCandidatesWithResults(*this, Results, CandidateSet, Loc); + CodeCompleteOverloadResults(*this, S, Results, Args.size(), + !CandidateSet.empty()); +} + +void Sema::CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc, + ArrayRef<Expr *> Args) { + if (!CodeCompleter) + return; + + // A complete type is needed to lookup for constructors. + if (RequireCompleteType(Loc, Type, 0)) + return; + + CXXRecordDecl *RD = Type->getAsCXXRecordDecl(); + if (!RD) { + CodeCompleteExpression(S, Type); + return; + } + + // FIXME: Provide support for member initializers. + // FIXME: Provide support for variadic template constructors. + + OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal); + + for (auto C : LookupConstructors(RD)) { + if (auto FD = dyn_cast<FunctionDecl>(C)) { + AddOverloadCandidate(FD, DeclAccessPair::make(FD, C->getAccess()), + Args, CandidateSet, + /*SuppressUsedConversions=*/false, + /*PartialOverloading=*/true); + } else if (auto FTD = dyn_cast<FunctionTemplateDecl>(C)) { + AddTemplateOverloadCandidate(FTD, + DeclAccessPair::make(FTD, C->getAccess()), + /*ExplicitTemplateArgs=*/nullptr, + Args, CandidateSet, + /*SuppressUsedConversions=*/false, + /*PartialOverloading=*/true); } } - if (ParamType.isNull()) - CodeCompleteOrdinaryName(S, PCC_Expression); - else - CodeCompleteExpression(S, ParamType); - - if (!Results.empty()) - CodeCompleter->ProcessOverloadCandidates(*this, Args.size(), Results.data(), - Results.size()); + SmallVector<ResultCandidate, 8> Results; + mergeCandidatesWithResults(*this, Results, CandidateSet, Loc); + CodeCompleteOverloadResults(*this, S, Results, Args.size()); } void Sema::CodeCompleteInitializer(Scope *S, Decl *D) { @@ -4981,7 +5115,7 @@ void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, // an action, e.g., // IBAction)<#selector#>:(id)sender if (DS.getObjCDeclQualifier() == 0 && !IsParameter && - Context.Idents.get("IBAction").hasMacroDefinition()) { + PP.isMacroDefined("IBAction")) { CodeCompletionBuilder Builder(Results.getAllocator(), Results.getCodeCompletionTUInfo(), CCP_CodePattern, CXAvailability_Available); diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp index c6af2ed..89f4b3a 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp @@ -128,45 +128,85 @@ bool Sema::isSimpleTypeSpecifier(tok::TokenKind Kind) const { return false; } +namespace { +enum class UnqualifiedTypeNameLookupResult { + NotFound, + FoundNonType, + FoundType +}; +} // namespace + +/// \brief Tries to perform unqualified lookup of the type decls in bases for +/// dependent class. +/// \return \a NotFound if no any decls is found, \a FoundNotType if found not a +/// type decl, \a FoundType if only type decls are found. +static UnqualifiedTypeNameLookupResult +lookupUnqualifiedTypeNameInBase(Sema &S, const IdentifierInfo &II, + SourceLocation NameLoc, + const CXXRecordDecl *RD) { + if (!RD->hasDefinition()) + return UnqualifiedTypeNameLookupResult::NotFound; + // Look for type decls in base classes. + UnqualifiedTypeNameLookupResult FoundTypeDecl = + UnqualifiedTypeNameLookupResult::NotFound; + for (const auto &Base : RD->bases()) { + const CXXRecordDecl *BaseRD = nullptr; + if (auto *BaseTT = Base.getType()->getAs<TagType>()) + BaseRD = BaseTT->getAsCXXRecordDecl(); + else if (auto *TST = Base.getType()->getAs<TemplateSpecializationType>()) { + // Look for type decls in dependent base classes that have known primary + // templates. + if (!TST || !TST->isDependentType()) + continue; + auto *TD = TST->getTemplateName().getAsTemplateDecl(); + if (!TD) + continue; + auto *BasePrimaryTemplate = + dyn_cast_or_null<CXXRecordDecl>(TD->getTemplatedDecl()); + if (!BasePrimaryTemplate) + continue; + BaseRD = BasePrimaryTemplate; + } + if (BaseRD) { + for (NamedDecl *ND : BaseRD->lookup(&II)) { + if (!isa<TypeDecl>(ND)) + return UnqualifiedTypeNameLookupResult::FoundNonType; + FoundTypeDecl = UnqualifiedTypeNameLookupResult::FoundType; + } + if (FoundTypeDecl == UnqualifiedTypeNameLookupResult::NotFound) { + switch (lookupUnqualifiedTypeNameInBase(S, II, NameLoc, BaseRD)) { + case UnqualifiedTypeNameLookupResult::FoundNonType: + return UnqualifiedTypeNameLookupResult::FoundNonType; + case UnqualifiedTypeNameLookupResult::FoundType: + FoundTypeDecl = UnqualifiedTypeNameLookupResult::FoundType; + break; + case UnqualifiedTypeNameLookupResult::NotFound: + break; + } + } + } + } + + return FoundTypeDecl; +} + static ParsedType recoverFromTypeInKnownDependentBase(Sema &S, const IdentifierInfo &II, SourceLocation NameLoc) { - // Find the first parent class template context, if any. - // FIXME: Perform the lookup in all enclosing class templates. + // Lookup in the parent class template context, if any. const CXXRecordDecl *RD = nullptr; - for (DeclContext *DC = S.CurContext; DC; DC = DC->getParent()) { + UnqualifiedTypeNameLookupResult FoundTypeDecl = + UnqualifiedTypeNameLookupResult::NotFound; + for (DeclContext *DC = S.CurContext; + DC && FoundTypeDecl == UnqualifiedTypeNameLookupResult::NotFound; + DC = DC->getParent()) { + // Look for type decls in dependent base classes that have known primary + // templates. RD = dyn_cast<CXXRecordDecl>(DC); if (RD && RD->getDescribedClassTemplate()) - break; + FoundTypeDecl = lookupUnqualifiedTypeNameInBase(S, II, NameLoc, RD); } - if (!RD) - return ParsedType(); - - // Look for type decls in dependent base classes that have known primary - // templates. - bool FoundTypeDecl = false; - for (const auto &Base : RD->bases()) { - auto *TST = Base.getType()->getAs<TemplateSpecializationType>(); - if (!TST || !TST->isDependentType()) - continue; - auto *TD = TST->getTemplateName().getAsTemplateDecl(); - if (!TD) - continue; - auto *BasePrimaryTemplate = - dyn_cast_or_null<CXXRecordDecl>(TD->getTemplatedDecl()); - if (!BasePrimaryTemplate) - continue; - // FIXME: Allow lookup into non-dependent bases of dependent bases, possibly - // by calling or integrating with the main LookupQualifiedName mechanism. - for (NamedDecl *ND : BasePrimaryTemplate->lookup(&II)) { - if (FoundTypeDecl) - return ParsedType(); - FoundTypeDecl = isa<TypeDecl>(ND); - if (!FoundTypeDecl) - return ParsedType(); - } - } - if (!FoundTypeDecl) + if (FoundTypeDecl != UnqualifiedTypeNameLookupResult::FoundType) return ParsedType(); // We found some types in dependent base classes. Recover as if the user @@ -1708,7 +1748,7 @@ NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Loc, Loc, II, R, /*TInfo=*/nullptr, SC_Extern, false, - /*hasPrototype=*/true); + R->isFunctionProtoType()); New->setImplicit(); // Create Decl objects for each parameter, adding them to the @@ -1744,11 +1784,11 @@ NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, /// should not consider because they are not permitted to conflict, e.g., /// because they come from hidden sub-modules and do not refer to the same /// entity. -static void filterNonConflictingPreviousDecls(ASTContext &context, +static void filterNonConflictingPreviousDecls(Sema &S, NamedDecl *decl, LookupResult &previous){ // This is only interesting when modules are enabled. - if (!context.getLangOpts().Modules) + if (!S.getLangOpts().Modules) return; // Empty sets are uninteresting. @@ -1760,7 +1800,7 @@ static void filterNonConflictingPreviousDecls(ASTContext &context, NamedDecl *old = filter.next(); // Non-hidden declarations are never ignored. - if (!old->isHidden()) + if (S.isVisible(old)) continue; if (!old->isExternallyVisible()) @@ -1774,11 +1814,11 @@ static void filterNonConflictingPreviousDecls(ASTContext &context, /// entity if their types are the same. /// FIXME: This is notionally doing the same thing as ASTReaderDecl's /// isSameEntity. -static void filterNonConflictingPreviousTypedefDecls(ASTContext &Context, +static void filterNonConflictingPreviousTypedefDecls(Sema &S, TypedefNameDecl *Decl, LookupResult &Previous) { // This is only interesting when modules are enabled. - if (!Context.getLangOpts().Modules) + if (!S.getLangOpts().Modules) return; // Empty sets are uninteresting. @@ -1790,16 +1830,23 @@ static void filterNonConflictingPreviousTypedefDecls(ASTContext &Context, NamedDecl *Old = Filter.next(); // Non-hidden declarations are never ignored. - if (!Old->isHidden()) + if (S.isVisible(Old)) continue; // Declarations of the same entity are not ignored, even if they have // different linkages. - if (auto *OldTD = dyn_cast<TypedefNameDecl>(Old)) - if (Context.hasSameType(OldTD->getUnderlyingType(), - Decl->getUnderlyingType())) + if (auto *OldTD = dyn_cast<TypedefNameDecl>(Old)) { + if (S.Context.hasSameType(OldTD->getUnderlyingType(), + Decl->getUnderlyingType())) continue; + // If both declarations give a tag declaration a typedef name for linkage + // purposes, then they declare the same entity. + if (OldTD->getAnonDeclWithTypedefName(/*AnyRedecl*/true) && + Decl->getAnonDeclWithTypedefName()) + continue; + } + if (!Old->isExternallyVisible()) Filter.erase(); } @@ -1909,6 +1956,27 @@ void Sema::MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls) { if (Old->isInvalidDecl()) return New->setInvalidDecl(); + if (auto *OldTD = dyn_cast<TypedefNameDecl>(Old)) { + auto *OldTag = OldTD->getAnonDeclWithTypedefName(/*AnyRedecl*/true); + auto *NewTag = New->getAnonDeclWithTypedefName(); + NamedDecl *Hidden = nullptr; + if (getLangOpts().CPlusPlus && OldTag && NewTag && + OldTag->getCanonicalDecl() != NewTag->getCanonicalDecl() && + !hasVisibleDefinition(OldTag, &Hidden)) { + // There is a definition of this tag, but it is not visible. Use it + // instead of our tag. + New->setTypeForDecl(OldTD->getTypeForDecl()); + if (OldTD->isModed()) + New->setModedTypeSourceInfo(OldTD->getTypeSourceInfo(), + OldTD->getUnderlyingType()); + else + New->setTypeSourceInfo(OldTD->getTypeSourceInfo()); + + // Make the old tag definition visible. + makeMergedDefinitionVisible(Hidden, NewTag->getLocation()); + } + } + // If the typedef types are not identical, reject them in all languages and // with any extensions enabled. if (isIncompatibleTypedef(Old, New)) @@ -1978,7 +2046,6 @@ void Sema::MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls) { Diag(New->getLocation(), diag::ext_redefinition_of_typedef) << New->getDeclName(); Diag(Old->getLocation(), diag::note_previous_definition); - return; } /// DeclhasAttr - returns true if decl Declaration already has the target @@ -2643,7 +2710,7 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD, // UndefinedButUsed. if (!Old->isInlined() && New->isInlined() && !New->hasAttr<GNUInlineAttr>() && - (getLangOpts().CPlusPlus || !getLangOpts().GNUInline) && + !getLangOpts().GNUInline && Old->isUsed(false) && !Old->isDefined() && !New->isThisDeclarationADefinition()) UndefinedButUsed.insert(std::make_pair(Old->getCanonicalDecl(), @@ -2761,6 +2828,7 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD, << New << New->getType(); } Diag(OldLocation, PrevDiag) << Old << Old->getType(); + return true; // Complain if this is an explicit declaration of a special // member that was initially declared implicitly. @@ -3336,14 +3404,23 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) { } // C++ doesn't have tentative definitions, so go right ahead and check here. - const VarDecl *Def; + VarDecl *Def; if (getLangOpts().CPlusPlus && New->isThisDeclarationADefinition() == VarDecl::Definition && (Def = Old->getDefinition())) { - Diag(New->getLocation(), diag::err_redefinition) << New; - Diag(Def->getLocation(), diag::note_previous_definition); - New->setInvalidDecl(); - return; + NamedDecl *Hidden = nullptr; + if (!hasVisibleDefinition(Def, &Hidden) && + (New->getDescribedVarTemplate() || + New->getNumTemplateParameterLists() || + New->getDeclContext()->isDependentContext())) { + // The previous definition is hidden, and multiple definitions are + // permitted (in separate TUs). Form another definition of it. + } else { + Diag(New->getLocation(), diag::err_redefinition) << New; + Diag(Def->getLocation(), diag::note_previous_definition); + New->setInvalidDecl(); + return; + } } if (haveIncompatibleLanguageLinkages(Old, New)) { @@ -3375,8 +3452,20 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, return ParsedFreeStandingDeclSpec(S, AS, DS, MultiTemplateParamsArg()); } -static void HandleTagNumbering(Sema &S, const TagDecl *Tag, Scope *TagScope) { - if (!S.Context.getLangOpts().CPlusPlus) +// The MS ABI changed between VS2013 and VS2015 with regard to numbers used to +// disambiguate entities defined in different scopes. +// While the VS2015 ABI fixes potential miscompiles, it is also breaks +// compatibility. +// We will pick our mangling number depending on which version of MSVC is being +// targeted. +static unsigned getMSManglingNumber(const LangOptions &LO, Scope *S) { + return LO.isCompatibleWithMSVC(LangOptions::MSVC2015) + ? S->getMSCurManglingNumber() + : S->getMSLastManglingNumber(); +} + +void Sema::handleTagNumbering(const TagDecl *Tag, Scope *TagScope) { + if (!Context.getLangOpts().CPlusPlus) return; if (isa<CXXRecordDecl>(Tag->getParent())) { @@ -3385,21 +3474,62 @@ static void HandleTagNumbering(Sema &S, const TagDecl *Tag, Scope *TagScope) { if (!Tag->getName().empty() || Tag->getTypedefNameForAnonDecl()) return; MangleNumberingContext &MCtx = - S.Context.getManglingNumberContext(Tag->getParent()); - S.Context.setManglingNumber( - Tag, MCtx.getManglingNumber(Tag, TagScope->getMSLocalManglingNumber())); + Context.getManglingNumberContext(Tag->getParent()); + Context.setManglingNumber( + Tag, MCtx.getManglingNumber( + Tag, getMSManglingNumber(getLangOpts(), TagScope))); return; } // If this tag isn't a direct child of a class, number it if it is local. Decl *ManglingContextDecl; - if (MangleNumberingContext *MCtx = - S.getCurrentMangleNumberContext(Tag->getDeclContext(), - ManglingContextDecl)) { - S.Context.setManglingNumber( - Tag, - MCtx->getManglingNumber(Tag, TagScope->getMSLocalManglingNumber())); + if (MangleNumberingContext *MCtx = getCurrentMangleNumberContext( + Tag->getDeclContext(), ManglingContextDecl)) { + Context.setManglingNumber( + Tag, MCtx->getManglingNumber( + Tag, getMSManglingNumber(getLangOpts(), TagScope))); + } +} + +void Sema::setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, + TypedefNameDecl *NewTD) { + // Do nothing if the tag is not anonymous or already has an + // associated typedef (from an earlier typedef in this decl group). + if (TagFromDeclSpec->getIdentifier()) + return; + if (TagFromDeclSpec->getTypedefNameForAnonDecl()) + return; + + // A well-formed anonymous tag must always be a TUK_Definition. + assert(TagFromDeclSpec->isThisDeclarationADefinition()); + + // The type must match the tag exactly; no qualifiers allowed. + if (!Context.hasSameType(NewTD->getUnderlyingType(), + Context.getTagDeclType(TagFromDeclSpec))) + return; + + // If we've already computed linkage for the anonymous tag, then + // adding a typedef name for the anonymous decl can change that + // linkage, which might be a serious problem. Diagnose this as + // unsupported and ignore the typedef name. TODO: we should + // pursue this as a language defect and establish a formal rule + // for how to handle it. + if (TagFromDeclSpec->hasLinkageBeenComputed()) { + Diag(NewTD->getLocation(), diag::err_typedef_changes_linkage); + + SourceLocation tagLoc = TagFromDeclSpec->getInnerLocStart(); + tagLoc = getLocForEndOfToken(tagLoc); + + llvm::SmallString<40> textToInsert; + textToInsert += ' '; + textToInsert += NewTD->getIdentifier()->getName(); + Diag(tagLoc, diag::note_typedef_changes_linkage) + << FixItHint::CreateInsertion(tagLoc, textToInsert); + return; } + + // Otherwise, set this is the anon-decl typedef for the tag. + TagFromDeclSpec->setTypedefNameForAnonDecl(NewTD); } /// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with @@ -3431,7 +3561,7 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, } if (Tag) { - HandleTagNumbering(*this, Tag, S); + handleTagNumbering(Tag, S); Tag->setFreeStanding(); if (Tag->isInvalidDecl()) return Tag; @@ -3471,7 +3601,7 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, return ActOnFriendTypeDecl(S, DS, TemplateParams); } - CXXScopeSpec &SS = DS.getTypeSpecScope(); + const CXXScopeSpec &SS = DS.getTypeSpecScope(); bool IsExplicitSpecialization = !TemplateParams.empty() && TemplateParams.back()->size() == 0; if (Tag && SS.isNotEmpty() && !Tag->isCompleteDefinition() && @@ -3498,7 +3628,8 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DS.getStorageClassSpec() != DeclSpec::SCS_typedef) { if (getLangOpts().CPlusPlus || Record->getDeclContext()->isRecord()) - return BuildAnonymousStructOrUnion(S, DS, AS, Record, Context.getPrintingPolicy()); + return BuildAnonymousStructOrUnion(S, DS, AS, Record, + Context.getPrintingPolicy()); DeclaresAnything = false; } @@ -3726,8 +3857,7 @@ static bool InjectAnonymousStructOrUnionMembers(Sema &SemaRef, Scope *S, // anonymous union is declared. unsigned OldChainingSize = Chaining.size(); if (IndirectFieldDecl *IF = dyn_cast<IndirectFieldDecl>(VD)) - for (auto *PI : IF->chain()) - Chaining.push_back(PI); + Chaining.append(IF->chain_begin(), IF->chain_end()); else Chaining.push_back(VD); @@ -4054,10 +4184,11 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, if (VarDecl *NewVD = dyn_cast<VarDecl>(Anon)) { if (getLangOpts().CPlusPlus && NewVD->isStaticLocal()) { Decl *ManglingContextDecl; - if (MangleNumberingContext *MCtx = - getCurrentMangleNumberContext(NewVD->getDeclContext(), - ManglingContextDecl)) { - Context.setManglingNumber(NewVD, MCtx->getManglingNumber(NewVD, S->getMSLocalManglingNumber())); + if (MangleNumberingContext *MCtx = getCurrentMangleNumberContext( + NewVD->getDeclContext(), ManglingContextDecl)) { + Context.setManglingNumber( + NewVD, MCtx->getManglingNumber( + NewVD, getMSManglingNumber(getLangOpts(), S))); Context.setStaticLocalNumber(NewVD, MCtx->getStaticLocalNumber(NewVD)); } } @@ -4767,6 +4898,8 @@ static QualType TryToFixInvalidVariablyModifiedType(QualType T, static void FixInvalidVariablyModifiedTypeLoc(TypeLoc SrcTL, TypeLoc DstTL) { + SrcTL = SrcTL.getUnqualifiedLoc(); + DstTL = DstTL.getUnqualifiedLoc(); if (PointerTypeLoc SrcPTL = SrcTL.getAs<PointerTypeLoc>()) { PointerTypeLoc DstPTL = DstTL.castAs<PointerTypeLoc>(); FixInvalidVariablyModifiedTypeLoc(SrcPTL.getPointeeLoc(), @@ -4823,27 +4956,13 @@ Sema::RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S) { return; // Note that we have a locally-scoped external with this name. - // FIXME: There can be multiple such declarations if they are functions marked - // __attribute__((overloadable)) declared in function scope in C. - LocallyScopedExternCDecls[ND->getDeclName()] = ND; + Context.getExternCContextDecl()->makeDeclVisibleInContext(ND); } NamedDecl *Sema::findLocallyScopedExternCDecl(DeclarationName Name) { - if (ExternalSource) { - // Load locally-scoped external decls from the external source. - // FIXME: This is inefficient. Maybe add a DeclContext for extern "C" decls? - SmallVector<NamedDecl *, 4> Decls; - ExternalSource->ReadLocallyScopedExternCDecls(Decls); - for (unsigned I = 0, N = Decls.size(); I != N; ++I) { - llvm::DenseMap<DeclarationName, NamedDecl *>::iterator Pos - = LocallyScopedExternCDecls.find(Decls[I]->getDeclName()); - if (Pos == LocallyScopedExternCDecls.end()) - LocallyScopedExternCDecls[Decls[I]->getDeclName()] = Decls[I]; - } - } - - NamedDecl *D = LocallyScopedExternCDecls.lookup(Name); - return D ? D->getMostRecentDecl() : nullptr; + // FIXME: We can have multiple results via __attribute__((overloadable)). + auto Result = Context.getExternCContextDecl()->lookup(Name); + return Result.empty() ? nullptr : *Result.begin(); } /// \brief Diagnose function specifiers on a declaration of an identifier that @@ -4955,7 +5074,7 @@ Sema::ActOnTypedefNameDecl(Scope *S, DeclContext *DC, TypedefNameDecl *NewTD, // in an outer scope, it isn't the same thing. FilterLookupForScope(Previous, DC, S, /*ConsiderLinkage*/false, /*AllowInlineNamespace*/false); - filterNonConflictingPreviousTypedefDecls(Context, NewTD, Previous); + filterNonConflictingPreviousTypedefDecls(*this, NewTD, Previous); if (!Previous.empty()) { Redeclaration = true; MergeTypedefNameDecl(NewTD, Previous); @@ -5102,14 +5221,27 @@ static void checkAttributesAfterMerging(Sema &S, NamedDecl &ND) { if (ND.isExternallyVisible()) { S.Diag(Attr->getLocation(), diag::err_attribute_weakref_not_static); ND.dropAttr<WeakRefAttr>(); + ND.dropAttr<AliasAttr>(); } } - // 'selectany' only applies to externally visible varable declarations. + if (auto *VD = dyn_cast<VarDecl>(&ND)) { + if (VD->hasInit()) { + if (const auto *Attr = VD->getAttr<AliasAttr>()) { + assert(VD->isThisDeclarationADefinition() && + !VD->isExternallyVisible() && "Broken AliasAttr handled late!"); + S.Diag(Attr->getLocation(), diag::err_alias_is_definition) << VD; + VD->dropAttr<AliasAttr>(); + } + } + } + + // 'selectany' only applies to externally visible variable declarations. // It does not apply to functions. if (SelectAnyAttr *Attr = ND.getAttr<SelectAnyAttr>()) { if (isa<FunctionDecl>(ND) || !ND.isExternallyVisible()) { - S.Diag(Attr->getLocation(), diag::err_attribute_selectany_non_extern_data); + S.Diag(Attr->getLocation(), + diag::err_attribute_selectany_non_extern_data); ND.dropAttr<SelectAnyAttr>(); } } @@ -5565,8 +5697,9 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, } } } else { - assert(D.getName().getKind() != UnqualifiedId::IK_TemplateId && - "should have a 'template<>' for this decl"); + assert( + (Invalid || D.getName().getKind() != UnqualifiedId::IK_TemplateId) && + "should have a 'template<>' for this decl"); } if (IsVariableTemplateSpecialization) { @@ -5627,6 +5760,7 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, if (IsLocalExternDecl) NewVD->setLocalExternDecl(); + bool EmitTLSUnsupportedError = false; if (DeclSpec::TSCS TSCS = D.getDeclSpec().getThreadStorageClassSpec()) { // C++11 [dcl.stc]p4: // When thread_local is applied to a variable of block scope the @@ -5641,10 +5775,20 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(), diag::err_thread_non_global) << DeclSpec::getSpecifierName(TSCS); - else if (!Context.getTargetInfo().isTLSSupported()) - Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(), - diag::err_thread_unsupported); - else + else if (!Context.getTargetInfo().isTLSSupported()) { + if (getLangOpts().CUDA) { + // Postpone error emission until we've collected attributes required to + // figure out whether it's a host or device variable and whether the + // error should be ignored. + EmitTLSUnsupportedError = true; + // We still need to mark the variable as TLS so it shows up in AST with + // proper storage class for other tools to use even if we're not going + // to emit any code for it. + NewVD->setTSCSpec(TSCS); + } else + Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(), + diag::err_thread_unsupported); + } else NewVD->setTSCSpec(TSCS); } @@ -5693,6 +5837,9 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, ProcessDeclAttributes(S, NewVD, D); if (getLangOpts().CUDA) { + if (EmitTLSUnsupportedError && DeclAttrsMatchCUDAMode(getLangOpts(), NewVD)) + Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(), + diag::err_thread_unsupported); // CUDA B.2.5: "__shared__ and __constant__ variables have implied static // storage [duration]." if (SC == SC_None && S->getFnParent() != nullptr && @@ -5845,11 +5992,11 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, if (getLangOpts().CPlusPlus && NewVD->isStaticLocal()) { Decl *ManglingContextDecl; - if (MangleNumberingContext *MCtx = - getCurrentMangleNumberContext(NewVD->getDeclContext(), - ManglingContextDecl)) { + if (MangleNumberingContext *MCtx = getCurrentMangleNumberContext( + NewVD->getDeclContext(), ManglingContextDecl)) { Context.setManglingNumber( - NewVD, MCtx->getManglingNumber(NewVD, S->getMSLocalManglingNumber())); + NewVD, MCtx->getManglingNumber( + NewVD, getMSManglingNumber(getLangOpts(), S))); Context.setStaticLocalNumber(NewVD, MCtx->getStaticLocalNumber(NewVD)); } } @@ -6243,7 +6390,7 @@ bool Sema::CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous) { Previous.setShadowed(); // Filter out any non-conflicting previous declarations. - filterNonConflictingPreviousDecls(Context, NewVD, Previous); + filterNonConflictingPreviousDecls(*this, NewVD, Previous); if (!Previous.empty()) { MergeVarDecl(NewVD, Previous); @@ -6898,8 +7045,10 @@ static void checkIsValidOpenCLKernelParameter( // We have an error, now let's go back up through history and show where // the offending field came from - for (ArrayRef<const FieldDecl *>::const_iterator I = HistoryStack.begin() + 1, - E = HistoryStack.end(); I != E; ++I) { + for (ArrayRef<const FieldDecl *>::const_iterator + I = HistoryStack.begin() + 1, + E = HistoryStack.end(); + I != E; ++I) { const FieldDecl *OuterField = *I; S.Diag(OuterField->getLocation(), diag::note_within_field_of_type) << OuterField->getType(); @@ -7010,12 +7159,12 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC, // Check that we can declare a template here. if (CheckTemplateDeclScope(S, TemplateParams)) - return nullptr; + NewFD->setInvalidDecl(); // A destructor cannot be a template. if (Name.getNameKind() == DeclarationName::CXXDestructorName) { Diag(NewFD->getLocation(), diag::err_destructor_template); - return nullptr; + NewFD->setInvalidDecl(); } // If we're adding a template to a dependent context, we may need to @@ -7347,7 +7496,8 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC, NewFD->setInvalidDecl(); } - if (D.isFunctionDefinition() && CodeSegStack.CurrentValue && + // Apply an implicit SectionAttr if #pragma code_seg is active. + if (CodeSegStack.CurrentValue && D.isFunctionDefinition() && !NewFD->hasAttr<SectionAttr>()) { NewFD->addAttr( SectionAttr::CreateImplicit(Context, SectionAttr::Declspec_allocate, @@ -7363,23 +7513,10 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC, // Handle attributes. ProcessDeclAttributes(S, NewFD, D); - QualType RetType = NewFD->getReturnType(); - const CXXRecordDecl *Ret = RetType->isRecordType() ? - RetType->getAsCXXRecordDecl() : RetType->getPointeeCXXRecordDecl(); - if (!NewFD->isInvalidDecl() && !NewFD->hasAttr<WarnUnusedResultAttr>() && - Ret && Ret->hasAttr<WarnUnusedResultAttr>()) { - const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD); - // Attach WarnUnusedResult to functions returning types with that attribute. - // Don't apply the attribute to that type's own non-static member functions - // (to avoid warning on things like assignment operators) - if (!MD || MD->getParent() != Ret) - NewFD->addAttr(WarnUnusedResultAttr::CreateImplicit(Context)); - } - if (getLangOpts().OpenCL) { // OpenCL v1.1 s6.5: Using an address space qualifier in a function return // type declaration will generate a compilation error. - unsigned AddressSpace = RetType.getAddressSpace(); + unsigned AddressSpace = NewFD->getReturnType().getAddressSpace(); if (AddressSpace == LangAS::opencl_local || AddressSpace == LangAS::opencl_global || AddressSpace == LangAS::opencl_constant) { @@ -7402,7 +7539,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC, D.setRedeclaration(CheckFunctionDeclaration(S, NewFD, Previous, isExplicitSpecialization)); else if (!Previous.empty()) - // Make graceful recovery from an invalid redeclaration. + // Recover gracefully from an invalid redeclaration. D.setRedeclaration(true); assert((NewFD->isInvalidDecl() || !D.isRedeclaration() || Previous.getResultKind() != LookupResult::FoundOverloaded) && @@ -7543,6 +7680,9 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC, if (!NewFD->isInvalidDecl()) D.setRedeclaration(CheckFunctionDeclaration(S, NewFD, Previous, isExplicitSpecialization)); + else if (!Previous.empty()) + // Recover gracefully from an invalid redeclaration. + D.setRedeclaration(true); } assert((NewFD->isInvalidDecl() || !D.isRedeclaration() || @@ -7799,7 +7939,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, !Previous.isShadowed(); // Filter out any non-conflicting previous declarations. - filterNonConflictingPreviousDecls(Context, NewFD, Previous); + filterNonConflictingPreviousDecls(*this, NewFD, Previous); bool Redeclaration = false; NamedDecl *OldDecl = nullptr; @@ -7854,7 +7994,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, // Check for a previous extern "C" declaration with this name. if (!Redeclaration && checkForConflictWithNonVisibleExternC(*this, NewFD, Previous)) { - filterNonConflictingPreviousDecls(Context, NewFD, Previous); + filterNonConflictingPreviousDecls(*this, NewFD, Previous); if (!Previous.empty()) { // This is an extern "C" declaration with the same name as a previous // declaration, and thus redeclares that entity... @@ -7952,28 +8092,8 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, // This needs to happen first so that 'inline' propagates. NewFD->setPreviousDeclaration(cast<FunctionDecl>(OldDecl)); - if (isa<CXXMethodDecl>(NewFD)) { - // A valid redeclaration of a C++ method must be out-of-line, - // but (unfortunately) it's not necessarily a definition - // because of templates, which means that the previous - // declaration is not necessarily from the class definition. - - // For just setting the access, that doesn't matter. - CXXMethodDecl *oldMethod = cast<CXXMethodDecl>(OldDecl); - NewFD->setAccess(oldMethod->getAccess()); - - // Update the key-function state if necessary for this ABI. - if (NewFD->isInlined() && - !Context.getTargetInfo().getCXXABI().canKeyFunctionBeInline()) { - // setNonKeyFunction needs to work with the original - // declaration from the class definition, and isVirtual() is - // just faster in that case, so map back to that now. - oldMethod = cast<CXXMethodDecl>(oldMethod->getFirstDecl()); - if (oldMethod->isVirtual()) { - Context.setNonKeyFunction(oldMethod); - } - } - } + if (isa<CXXMethodDecl>(NewFD)) + NewFD->setAccess(OldDecl->getAccess()); } } @@ -8153,6 +8273,12 @@ void Sema::CheckMain(FunctionDecl* FD, const DeclSpec& DS) { bool HasExtraParameters = (nparams > 3); + if (FTP->isVariadic()) { + Diag(FD->getLocation(), diag::ext_variadic_main); + // FIXME: if we had information about the location of the ellipsis, we + // could add a FixIt hint to remove it as a parameter. + } + // Darwin passes an undocumented fourth argument of type char**. If // other platforms start sprouting these, the logic below will start // getting shifty. @@ -8543,8 +8669,13 @@ namespace { diag = diag::warn_uninit_self_reference_in_reference_init; } else if (cast<VarDecl>(OrigDecl)->isStaticLocal()) { diag = diag::warn_static_self_reference_in_init; - } else { + } else if (isa<TranslationUnitDecl>(OrigDecl->getDeclContext()) || + isa<NamespaceDecl>(OrigDecl->getDeclContext()) || + DRE->getDecl()->getType()->isRecordType()) { diag = diag::warn_uninit_self_reference_in_init; + } else { + // Local variables will be handled by the CFG analysis. + return; } S.DiagRuntimeBehavior(DRE->getLocStart(), DRE, @@ -8586,7 +8717,7 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, // If there is no declaration, there was an error parsing it. Just ignore // the initializer. if (!RealDecl || RealDecl->isInvalidDecl()) { - CorrectDelayedTyposInExpr(Init); + CorrectDelayedTyposInExpr(Init, dyn_cast_or_null<VarDecl>(RealDecl)); return; } @@ -8617,6 +8748,21 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, // C++11 [decl.spec.auto]p6. Deduce the type which 'auto' stands in for. if (TypeMayContainAuto && VDecl->getType()->isUndeducedType()) { + // Attempt typo correction early so that the type of the init expression can + // be deduced based on the chosen correction:if the original init contains a + // TypoExpr. + ExprResult Res = CorrectDelayedTyposInExpr(Init, VDecl); + if (!Res.isUsable()) { + RealDecl->setInvalidDecl(); + return; + } + + if (Res.get() != Init) { + Init = Res.get(); + if (CXXDirectInit) + CXXDirectInit = dyn_cast<ParenListExpr>(Init); + } + Expr *DeduceInit = Init; // Initializer could be a C++ direct-initializer. Deduction only works if it // contains exactly one expression. @@ -8746,16 +8892,24 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, VDecl->setInvalidDecl(); } - const VarDecl *Def; + VarDecl *Def; if ((Def = VDecl->getDefinition()) && Def != VDecl) { - Diag(VDecl->getLocation(), diag::err_redefinition) - << VDecl->getDeclName(); - Diag(Def->getLocation(), diag::note_previous_definition); - VDecl->setInvalidDecl(); - return; + NamedDecl *Hidden = nullptr; + if (!hasVisibleDefinition(Def, &Hidden) && + (VDecl->getDescribedVarTemplate() || + VDecl->getNumTemplateParameterLists() || + VDecl->getDeclContext()->isDependentContext())) { + // The previous definition is hidden, and multiple definitions are + // permitted (in separate TUs). Form another definition of it. + } else { + Diag(VDecl->getLocation(), diag::err_redefinition) + << VDecl->getDeclName(); + Diag(Def->getLocation(), diag::note_previous_definition); + VDecl->setInvalidDecl(); + return; + } } - const VarDecl *PrevInit = nullptr; if (getLangOpts().CPlusPlus) { // C++ [class.static.data]p4 // If a static data member is of const integral or const @@ -8769,10 +8923,12 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, // We already performed a redefinition check above, but for static // data members we also need to check whether there was an in-class // declaration with an initializer. - if (VDecl->isStaticDataMember() && VDecl->getAnyInitializer(PrevInit)) { + if (VDecl->isStaticDataMember() && VDecl->getCanonicalDecl()->hasInit()) { Diag(Init->getExprLoc(), diag::err_static_data_member_reinitialization) << VDecl->getDeclName(); - Diag(PrevInit->getInit()->getExprLoc(), diag::note_previous_initializer) << 0; + Diag(VDecl->getCanonicalDecl()->getInit()->getExprLoc(), + diag::note_previous_initializer) + << 0; return; } @@ -8829,8 +8985,8 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, // Try to correct any TypoExprs in the initialization arguments. for (size_t Idx = 0; Idx < Args.size(); ++Idx) { - ExprResult Res = - CorrectDelayedTyposInExpr(Args[Idx], [this, Entity, Kind](Expr *E) { + ExprResult Res = CorrectDelayedTyposInExpr( + Args[Idx], VDecl, [this, Entity, Kind](Expr *E) { InitializationSequence Init(*this, Entity, Kind, MultiExprArg(E)); return Init.Failed() ? ExprError() : E; }); @@ -9253,6 +9409,8 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl, Var->setInvalidDecl(); return; } + } else { + return; } // The variable can not have an abstract class type. @@ -9350,8 +9508,6 @@ void Sema::ActOnCXXForRangeDecl(Decl *D) { case SC_OpenCLWorkGroupLocal: llvm_unreachable("Unexpected storage class"); } - if (VD->isConstexpr()) - Error = 5; if (Error != -1) { Diag(VD->getOuterLocStart(), diag::err_for_range_storage_class) << VD->getDeclName() << Error; @@ -9453,7 +9609,9 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) { } - if (var->isThisDeclarationADefinition() && + // Apply section attributes and pragmas to global variables. + bool GlobalStorage = var->hasGlobalStorage(); + if (GlobalStorage && var->isThisDeclarationADefinition() && ActiveTemplateInstantiations.empty()) { PragmaStack<StringLiteral *> *Stack = nullptr; int SectionFlags = ASTContext::PSF_Implicit | ASTContext::PSF_Read; @@ -9466,11 +9624,11 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) { Stack = &DataSegStack; SectionFlags |= ASTContext::PSF_Write; } - if (!var->hasAttr<SectionAttr>() && Stack->CurrentValue) - var->addAttr( - SectionAttr::CreateImplicit(Context, SectionAttr::Declspec_allocate, - Stack->CurrentValue->getString(), - Stack->CurrentPragmaLocation)); + if (Stack->CurrentValue && !var->hasAttr<SectionAttr>()) { + var->addAttr(SectionAttr::CreateImplicit( + Context, SectionAttr::Declspec_allocate, + Stack->CurrentValue->getString(), Stack->CurrentPragmaLocation)); + } if (const SectionAttr *SA = var->getAttr<SectionAttr>()) if (UnifySection(SA->getName(), SectionFlags, var)) var->dropAttr<SectionAttr>(); @@ -9513,7 +9671,7 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) { } Expr *Init = var->getInit(); - bool IsGlobal = var->hasGlobalStorage() && !var->isStaticLocal(); + bool IsGlobal = GlobalStorage && !var->isStaticLocal(); QualType baseType = Context.getBaseElementType(type); if (!var->getDeclContext()->isDependentContext() && @@ -9625,18 +9783,6 @@ Sema::FinalizeDeclaration(Decl *ThisDecl) { } } - if (!VD->isInvalidDecl() && - VD->isThisDeclarationADefinition() == VarDecl::TentativeDefinition) { - if (const VarDecl *Def = VD->getDefinition()) { - if (Def->hasAttr<AliasAttr>()) { - Diag(VD->getLocation(), diag::err_tentative_after_alias) - << VD->getDeclName(); - Diag(Def->getLocation(), diag::note_previous_definition); - VD->setInvalidDecl(); - } - } - } - const DeclContext *DC = VD->getDeclContext(); // If there's a #pragma GCC visibility in scope, and this isn't a class // member, set the visibility of this variable. @@ -9699,7 +9845,7 @@ Sema::DeclGroupPtrTy Sema::FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, if (DeclSpec::isDeclRep(DS.getTypeSpecType())) { if (TagDecl *Tag = dyn_cast_or_null<TagDecl>(DS.getRepAsDecl())) { - HandleTagNumbering(*this, Tag, S); + handleTagNumbering(Tag, S); if (!Tag->hasNameForLinkage() && !Tag->hasDeclaratorForAnonDecl()) Tag->setDeclaratorForAnonDecl(FirstDeclaratorInGroup); } @@ -9766,9 +9912,12 @@ void Sema::ActOnDocumentableDecl(Decl *D) { void Sema::ActOnDocumentableDecls(ArrayRef<Decl *> Group) { // Don't parse the comment if Doxygen diagnostics are ignored. if (Group.empty() || !Group[0]) - return; + return; - if (Diags.isIgnored(diag::warn_doc_param_not_found, Group[0]->getLocation())) + if (Diags.isIgnored(diag::warn_doc_param_not_found, + Group[0]->getLocation()) && + Diags.isIgnored(diag::warn_unknown_comment_command_name, + Group[0]->getLocation())) return; if (Group.size() >= 2) { @@ -10058,7 +10207,7 @@ void Sema::ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, << " int " << FTI.Params[i].Ident->getName() << ";\n"; Diag(FTI.Params[i].IdentLoc, diag::ext_param_not_declared) << FTI.Params[i].Ident - << FixItHint::CreateInsertion(LocAfterDecls, Code.str()); + << FixItHint::CreateInsertion(LocAfterDecls, Code); // Implicitly declare the argument as type 'int' for lack of a better // type. @@ -10127,6 +10276,10 @@ static bool ShouldWarnAboutMissingPrototype(const FunctionDecl *FD, if (FD->hasAttr<OpenCLKernelAttr>()) return false; + // Don't warn on explicitly deleted functions. + if (FD->isDeleted()) + return false; + bool MissingPrototype = true; for (const FunctionDecl *Prev = FD->getPreviousDecl(); Prev; Prev = Prev->getPreviousDecl()) { @@ -10157,6 +10310,18 @@ Sema::CheckForFunctionRedefinition(FunctionDecl *FD, if (canRedefineFunction(Definition, getLangOpts())) return; + // If we don't have a visible definition of the function, and it's inline or + // a template, it's OK to form another definition of it. + // + // FIXME: Should we skip the body of the function and use the old definition + // in this case? That may be necessary for functions that return local types + // through a deduced return type, or instantiate templates with local types. + if (!hasVisibleDefinition(Definition) && + (Definition->isInlineSpecified() || + Definition->getDescribedFunctionTemplate() || + Definition->getNumTemplateParameterLists())) + return; + if (getLangOpts().GNUMode && Definition->isInlineSpecified() && Definition->getStorageClass() == SC_Extern) Diag(FD->getLocation(), diag::err_redefinition_extern_inline) @@ -10269,30 +10434,6 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D) { diag::err_func_def_incomplete_result)) FD->setInvalidDecl(); - // GNU warning -Wmissing-prototypes: - // Warn if a global function is defined without a previous - // prototype declaration. This warning is issued even if the - // definition itself provides a prototype. The aim is to detect - // global functions that fail to be declared in header files. - const FunctionDecl *PossibleZeroParamPrototype = nullptr; - if (ShouldWarnAboutMissingPrototype(FD, PossibleZeroParamPrototype)) { - Diag(FD->getLocation(), diag::warn_missing_prototype) << FD; - - if (PossibleZeroParamPrototype) { - // We found a declaration that is not a prototype, - // but that could be a zero-parameter prototype - if (TypeSourceInfo *TI = - PossibleZeroParamPrototype->getTypeSourceInfo()) { - TypeLoc TL = TI->getTypeLoc(); - if (FunctionNoProtoTypeLoc FTL = TL.getAs<FunctionNoProtoTypeLoc>()) - Diag(PossibleZeroParamPrototype->getLocation(), - diag::note_declaration_not_a_prototype) - << PossibleZeroParamPrototype - << FixItHint::CreateInsertion(FTL.getRParenLoc(), "void"); - } - } - } - if (FnBodyScope) PushDeclContext(FnBodyScope, FD); @@ -10321,9 +10462,9 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D) { I != E; ++I) { NamedDecl *D = *I; - // Some of these decls (like enums) may have been pinned to the translation unit - // for lack of a real context earlier. If so, remove from the translation unit - // and reattach to the current context. + // Some of these decls (like enums) may have been pinned to the + // translation unit for lack of a real context earlier. If so, remove + // from the translation unit and reattach to the current context. if (D->getLexicalDeclContext() == Context.getTranslationUnitDecl()) { // Is the decl actually in the context? for (const auto *DI : Context.getTranslationUnitDecl()->decls()) { @@ -10468,6 +10609,23 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body, Context.adjustDeducedFunctionResultType( FD, SubstAutoType(ResultType.getType(), Context.VoidTy)); } + } else if (getLangOpts().CPlusPlus11 && isLambdaCallOperator(FD)) { + auto *LSI = getCurLambda(); + if (LSI->HasImplicitReturnType) { + deduceClosureReturnType(*LSI); + + // C++11 [expr.prim.lambda]p4: + // [...] if there are no return statements in the compound-statement + // [the deduced type is] the type void + QualType RetType = + LSI->ReturnType.isNull() ? Context.VoidTy : LSI->ReturnType; + + // Update the return type to the deduced type. + const FunctionProtoType *Proto = + FD->getType()->getAs<FunctionProtoType>(); + FD->setType(Context.getFunctionType(RetType, Proto->getParamTypes(), + Proto->getExtProtoInfo())); + } } // The only way to be included in UndefinedButUsed is if there is an @@ -10477,7 +10635,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body, if (!FD->isExternallyVisible()) UndefinedButUsed.erase(FD); else if (FD->isInlined() && - (LangOpts.CPlusPlus || !LangOpts.GNUInline) && + !LangOpts.GNUInline && (!FD->getPreviousDecl()->hasAttr<GNUInlineAttr>())) UndefinedButUsed.erase(FD); } @@ -10494,7 +10652,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body, if (!FD->isInvalidDecl()) { // Don't diagnose unused parameters of defaulted or deleted functions. - if (Body) + if (!FD->isDeleted() && !FD->isDefaulted()) DiagnoseUnusedParameters(FD->param_begin(), FD->param_end()); DiagnoseSizeOfParametersAndReturnValue(FD->param_begin(), FD->param_end(), FD->getReturnType(), FD); @@ -10512,7 +10670,55 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body, !FD->isDependentContext()) computeNRVO(Body, getCurFunction()); } - + + // GNU warning -Wmissing-prototypes: + // Warn if a global function is defined without a previous + // prototype declaration. This warning is issued even if the + // definition itself provides a prototype. The aim is to detect + // global functions that fail to be declared in header files. + const FunctionDecl *PossibleZeroParamPrototype = nullptr; + if (ShouldWarnAboutMissingPrototype(FD, PossibleZeroParamPrototype)) { + Diag(FD->getLocation(), diag::warn_missing_prototype) << FD; + + if (PossibleZeroParamPrototype) { + // We found a declaration that is not a prototype, + // but that could be a zero-parameter prototype + if (TypeSourceInfo *TI = + PossibleZeroParamPrototype->getTypeSourceInfo()) { + TypeLoc TL = TI->getTypeLoc(); + if (FunctionNoProtoTypeLoc FTL = TL.getAs<FunctionNoProtoTypeLoc>()) + Diag(PossibleZeroParamPrototype->getLocation(), + diag::note_declaration_not_a_prototype) + << PossibleZeroParamPrototype + << FixItHint::CreateInsertion(FTL.getRParenLoc(), "void"); + } + } + } + + if (auto *MD = dyn_cast<CXXMethodDecl>(FD)) { + const CXXMethodDecl *KeyFunction; + if (MD->isOutOfLine() && (MD = MD->getCanonicalDecl()) && + MD->isVirtual() && + (KeyFunction = Context.getCurrentKeyFunction(MD->getParent())) && + MD == KeyFunction->getCanonicalDecl()) { + // Update the key-function state if necessary for this ABI. + if (FD->isInlined() && + !Context.getTargetInfo().getCXXABI().canKeyFunctionBeInline()) { + Context.setNonKeyFunction(MD); + + // If the newly-chosen key function is already defined, then we + // need to mark the vtable as used retroactively. + KeyFunction = Context.getCurrentKeyFunction(MD->getParent()); + const FunctionDecl *Definition; + if (KeyFunction && KeyFunction->isDefined(Definition)) + MarkVTableUsed(Definition->getLocation(), MD->getParent(), true); + } else { + // We just defined they key function; mark the vtable as used. + MarkVTableUsed(FD->getLocation(), MD->getParent(), true); + } + } + } + assert((FD == getCurFunctionDecl() || getCurLambda()->CallOperator == FD) && "Function parsing confused"); } else if (ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(dcl)) { @@ -10561,7 +10767,8 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body, if (getCurFunction()->ObjCWarnForNoInitDelegation) { // Don't issue this warning for unavaialable inits. if (!MD->isUnavailable()) - Diag(MD->getLocation(), diag::warn_objc_secondary_init_missing_init_call); + Diag(MD->getLocation(), + diag::warn_objc_secondary_init_missing_init_call); getCurFunction()->ObjCWarnForNoInitDelegation = false; } } else { @@ -10573,7 +10780,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body, "handled in the block above."); // Verify and clean out per-function state. - if (Body) { + if (Body && (!FD || !FD->isDefaulted())) { // C++ constructors that have function-try-blocks can't have return // statements in the handlers of that block. (C++ [except.handle]p14) // Verify this. @@ -10623,8 +10830,9 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body, } } - assert(ExprCleanupObjects.size() == ExprEvalContexts.back().NumCleanupObjects - && "Leftover temporaries in function"); + assert(ExprCleanupObjects.size() == + ExprEvalContexts.back().NumCleanupObjects && + "Leftover temporaries in function"); assert(!ExprNeedsCleanups && "Unaccounted cleanups in function"); assert(MaybeODRUseExprs.empty() && "Leftover expressions for odr-use checking"); @@ -10888,44 +11096,10 @@ TypedefDecl *Sema::ParseTypedefDecl(Scope *S, Declarator &D, QualType T, case TST_union: case TST_class: { TagDecl *tagFromDeclSpec = cast<TagDecl>(D.getDeclSpec().getRepAsDecl()); - - // Do nothing if the tag is not anonymous or already has an - // associated typedef (from an earlier typedef in this decl group). - if (tagFromDeclSpec->getIdentifier()) break; - if (tagFromDeclSpec->getTypedefNameForAnonDecl()) break; - - // A well-formed anonymous tag must always be a TUK_Definition. - assert(tagFromDeclSpec->isThisDeclarationADefinition()); - - // The type must match the tag exactly; no qualifiers allowed. - if (!Context.hasSameType(T, Context.getTagDeclType(tagFromDeclSpec))) - break; - - // If we've already computed linkage for the anonymous tag, then - // adding a typedef name for the anonymous decl can change that - // linkage, which might be a serious problem. Diagnose this as - // unsupported and ignore the typedef name. TODO: we should - // pursue this as a language defect and establish a formal rule - // for how to handle it. - if (tagFromDeclSpec->hasLinkageBeenComputed()) { - Diag(D.getIdentifierLoc(), diag::err_typedef_changes_linkage); - - SourceLocation tagLoc = D.getDeclSpec().getTypeSpecTypeLoc(); - tagLoc = getLocForEndOfToken(tagLoc); - - llvm::SmallString<40> textToInsert; - textToInsert += ' '; - textToInsert += D.getIdentifier()->getName(); - Diag(tagLoc, diag::note_typedef_changes_linkage) - << FixItHint::CreateInsertion(tagLoc, textToInsert); - break; - } - - // Otherwise, set this is the anon-decl typedef for the tag. - tagFromDeclSpec->setTypedefNameForAnonDecl(NewTD); + setTagNameForLinkagePurposes(tagFromDeclSpec, NewTD); break; } - + default: break; } @@ -11146,13 +11320,16 @@ static FixItHint createFriendTagNNSFixIt(Sema &SemaRef, NamedDecl *ND, Scope *S, return FixItHint::CreateInsertion(NameLoc, Insertion); } -/// ActOnTag - This is invoked when we see 'struct foo' or 'struct {'. In the +/// \brief This is invoked when we see 'struct foo' or 'struct {'. In the /// former case, Name will be non-null. In the later case, Name will be null. /// TagSpec indicates what kind of tag this is. TUK indicates whether this is a /// reference/declaration/definition of a tag. /// -/// IsTypeSpecifier is true if this is a type-specifier (or +/// \param IsTypeSpecifier \c true if this is a type-specifier (or /// trailing-type-specifier) other than one in an alias-declaration. +/// +/// \param SkipBody If non-null, will be set to indicate if the caller should +/// skip the definition of this tag and treat it as if it were a declaration. Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, @@ -11163,7 +11340,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, - bool IsTypeSpecifier) { + bool IsTypeSpecifier, SkipBodyInfo *SkipBody) { // If this is not a definition, it must have a name. IdentifierInfo *OrigName = Name; assert((Name != nullptr || TUK == TUK_Definition) && @@ -11206,7 +11383,8 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, ModulePrivateLoc, /*FriendLoc*/SourceLocation(), TemplateParameterLists.size()-1, - TemplateParameterLists.data()); + TemplateParameterLists.data(), + SkipBody); return Result.get(); } else { // The "template<>" header is extraneous. @@ -11472,6 +11650,10 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, } } + // If we have a known previous declaration to use, then use it. + if (Previous.empty() && SkipBody && SkipBody->Previous) + Previous.addDecl(SkipBody->Previous); + if (!Previous.empty()) { NamedDecl *PrevDecl = Previous.getFoundDecl(); NamedDecl *DirectPrevDecl = @@ -11589,7 +11771,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, // Diagnose attempts to redefine a tag. if (TUK == TUK_Definition) { - if (TagDecl *Def = PrevTagDecl->getDefinition()) { + if (NamedDecl *Def = PrevTagDecl->getDefinition()) { // If we're defining a specialization and the previous definition // is from an implicit instantiation, don't emit an error // here; we'll catch this in the general case below. @@ -11605,7 +11787,18 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, TSK_ExplicitSpecialization; } - if (!IsExplicitSpecializationAfterInstantiation) { + NamedDecl *Hidden = nullptr; + if (SkipBody && getLangOpts().CPlusPlus && + !hasVisibleDefinition(Def, &Hidden)) { + // There is a definition of this tag, but it is not visible. We + // explicitly make use of C++'s one definition rule here, and + // assume that this definition is identical to the hidden one + // we already have. Make the existing definition visible and + // use it in place of this one. + SkipBody->ShouldSkip = true; + makeMergedDefinitionVisible(Hidden, KWLoc); + return Def; + } else if (!IsExplicitSpecializationAfterInstantiation) { // A redeclaration in function prototype scope in C isn't // visible elsewhere, so merely issue a warning. if (!getLangOpts().CPlusPlus && S->containedInPrototypeScope()) @@ -12339,8 +12532,10 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T, InvalidDecl = true; bool ZeroWidth = false; + if (InvalidDecl) + BitWidth = nullptr; // If this is declared as a bit-field, check the bit-field. - if (!InvalidDecl && BitWidth) { + if (BitWidth) { BitWidth = VerifyBitField(Loc, II, T, Record->isMsStruct(Context), BitWidth, &ZeroWidth).get(); if (!BitWidth) { @@ -12354,7 +12549,8 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T, if (!InvalidDecl && Mutable) { unsigned DiagID = 0; if (T->isReferenceType()) - DiagID = diag::err_mutable_reference; + DiagID = getLangOpts().MSVCCompat ? diag::ext_mutable_reference + : diag::err_mutable_reference; else if (T.isConstQualified()) DiagID = diag::err_mutable_const; @@ -12363,8 +12559,10 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T, if (D && D->getDeclSpec().getStorageClassSpecLoc().isValid()) ErrLoc = D->getDeclSpec().getStorageClassSpecLoc(); Diag(ErrLoc, DiagID); - Mutable = false; - InvalidDecl = true; + if (DiagID != diag::ext_mutable_reference) { + Mutable = false; + InvalidDecl = true; + } } } @@ -13286,6 +13484,29 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum, Val, EnumVal); } +Sema::SkipBodyInfo Sema::shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, + SourceLocation IILoc) { + if (!getLangOpts().Modules || !getLangOpts().CPlusPlus) + return SkipBodyInfo(); + + // We have an anonymous enum definition. Look up the first enumerator to + // determine if we should merge the definition with an existing one and + // skip the body. + NamedDecl *PrevDecl = LookupSingleName(S, II, IILoc, LookupOrdinaryName, + ForRedeclaration); + auto *PrevECD = dyn_cast_or_null<EnumConstantDecl>(PrevDecl); + NamedDecl *Hidden; + if (PrevECD && + !hasVisibleDefinition(cast<NamedDecl>(PrevECD->getDeclContext()), + &Hidden)) { + SkipBodyInfo Skip; + Skip.ShouldSkip = true; + Skip.Previous = Hidden; + return Skip; + } + + return SkipBodyInfo(); +} Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, @@ -13528,6 +13749,49 @@ static void CheckForDuplicateEnumValues(Sema &S, ArrayRef<Decl *> Elements, } } +bool +Sema::IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, + bool AllowMask) const { + FlagEnumAttr *FEAttr = ED->getAttr<FlagEnumAttr>(); + assert(FEAttr && "looking for value in non-flag enum"); + + llvm::APInt FlagMask = ~FEAttr->getFlagBits(); + unsigned Width = FlagMask.getBitWidth(); + + // We will try a zero-extended value for the regular check first. + llvm::APInt ExtVal = Val.zextOrSelf(Width); + + // A value is in a flag enum if either its bits are a subset of the enum's + // flag bits (the first condition) or we are allowing masks and the same is + // true of its complement (the second condition). When masks are allowed, we + // allow the common idiom of ~(enum1 | enum2) to be a valid enum value. + // + // While it's true that any value could be used as a mask, the assumption is + // that a mask will have all of the insignificant bits set. Anything else is + // likely a logic error. + if (!(FlagMask & ExtVal)) + return true; + + if (AllowMask) { + // Try a one-extended value instead. This can happen if the enum is wider + // than the constant used, in C with extensions to allow for wider enums. + // The mask will still have the correct behaviour, so we give the user the + // benefit of the doubt. + // + // FIXME: This heuristic can cause weird results if the enum was extended + // to a larger type and is signed, because then bit-masks of smaller types + // that get extended will fall out of range (e.g. ~0x1u). We currently don't + // detect that case and will get a false positive for it. In most cases, + // though, it can be fixed by making it a signed type (e.g. ~0x1), so it may + // be fine just to accept this as a warning. + ExtVal |= llvm::APInt::getHighBitsSet(Width, Width - Val.getBitWidth()); + if (!(FlagMask & ~ExtVal)) + return true; + } + + return false; +} + void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, SourceLocation RBraceLoc, Decl *EnumDeclX, ArrayRef<Decl *> Elements, @@ -13613,10 +13877,8 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, BestPromotionType = Context.getPromotedIntegerType(BestType); else BestPromotionType = BestType; - // We don't need to set BestWidth, because BestType is going to be the type - // of the enumerators, but we do anyway because otherwise some compilers - // warn that it might be used uninitialized. - BestWidth = CharWidth; + + BestWidth = Context.getIntWidth(BestType); } else if (NumNegativeBits) { // If there is a negative value, figure out the smallest integer type (of @@ -13681,10 +13943,15 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, } } + FlagEnumAttr *FEAttr = Enum->getAttr<FlagEnumAttr>(); + if (FEAttr) + FEAttr->getFlagBits() = llvm::APInt(BestWidth, 0); + // Loop over all of the enumerator constants, changing their types to match - // the type of the enum if needed. - for (unsigned i = 0, e = Elements.size(); i != e; ++i) { - EnumConstantDecl *ECD = cast_or_null<EnumConstantDecl>(Elements[i]); + // the type of the enum if needed. If we have a flag type, we also prepare the + // FlagBits cache. + for (auto *D : Elements) { + auto *ECD = cast_or_null<EnumConstantDecl>(D); if (!ECD) continue; // Already issued a diagnostic. // Standard C says the enumerators have int type, but we allow, as an @@ -13714,7 +13981,7 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, // enum-specifier, each enumerator has the type of its // enumeration. ECD->setType(EnumType); - continue; + goto flagbits; } else { NewTy = BestType; NewWidth = BestWidth; @@ -13741,8 +14008,32 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, ECD->setType(EnumType); else ECD->setType(NewTy); + +flagbits: + // Check to see if we have a constant with exactly one bit set. Note that x + // & (x - 1) will be nonzero if and only if x has more than one bit set. + if (FEAttr) { + llvm::APInt ExtVal = InitVal.zextOrSelf(BestWidth); + if (ExtVal != 0 && !(ExtVal & (ExtVal - 1))) { + FEAttr->getFlagBits() |= ExtVal; + } + } } + if (FEAttr) { + for (Decl *D : Elements) { + EnumConstantDecl *ECD = cast_or_null<EnumConstantDecl>(D); + if (!ECD) continue; // Already issued a diagnostic. + + llvm::APSInt InitVal = ECD->getInitVal(); + if (InitVal != 0 && !IsValueInFlagEnum(Enum, InitVal, true)) + Diag(ECD->getLocation(), diag::warn_flag_enum_constant_out_of_range) + << ECD << Enum; + } + } + + + Enum->completeDefinition(BestType, BestPromotionType, NumPositiveBits, NumNegativeBits); @@ -13804,6 +14095,8 @@ DeclResult Sema::ActOnModuleImport(SourceLocation AtLoc, if (!Mod) return true; + VisibleModules.setVisible(Mod, ImportLoc); + checkModuleImportContext(*this, Mod, ImportLoc, CurContext); // FIXME: we should support importing a submodule within a different submodule @@ -13839,9 +14132,46 @@ DeclResult Sema::ActOnModuleImport(SourceLocation AtLoc, void Sema::ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod) { checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext); - // FIXME: Should we synthesize an ImportDecl here? - getModuleLoader().makeModuleVisible(Mod, Module::AllVisible, DirectiveLoc, - /*Complain=*/true); + // Determine whether we're in the #include buffer for a module. The #includes + // in that buffer do not qualify as module imports; they're just an + // implementation detail of us building the module. + // + // FIXME: Should we even get ActOnModuleInclude calls for those? + bool IsInModuleIncludes = + TUKind == TU_Module && + getSourceManager().isWrittenInMainFile(DirectiveLoc); + + // If this module import was due to an inclusion directive, create an + // implicit import declaration to capture it in the AST. + if (!IsInModuleIncludes) { + TranslationUnitDecl *TU = getASTContext().getTranslationUnitDecl(); + ImportDecl *ImportD = ImportDecl::CreateImplicit(getASTContext(), TU, + DirectiveLoc, Mod, + DirectiveLoc); + TU->addDecl(ImportD); + Consumer.HandleImplicitImportDecl(ImportD); + } + + getModuleLoader().makeModuleVisible(Mod, Module::AllVisible, DirectiveLoc); + VisibleModules.setVisible(Mod, DirectiveLoc); +} + +void Sema::ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod) { + checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext); + + if (getLangOpts().ModulesLocalVisibility) + VisibleModulesStack.push_back(std::move(VisibleModules)); + VisibleModules.setVisible(Mod, DirectiveLoc); +} + +void Sema::ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod) { + checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext); + + if (getLangOpts().ModulesLocalVisibility) { + VisibleModules = std::move(VisibleModulesStack.back()); + VisibleModulesStack.pop_back(); + VisibleModules.setVisible(Mod, DirectiveLoc); + } } void Sema::createImplicitModuleImportForErrorRecovery(SourceLocation Loc, @@ -13858,8 +14188,8 @@ void Sema::createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Consumer.HandleImplicitImportDecl(ImportD); // Make the module visible. - getModuleLoader().makeModuleVisible(Mod, Module::AllVisible, Loc, - /*Complain=*/false); + getModuleLoader().makeModuleVisible(Mod, Module::AllVisible, Loc); + VisibleModules.setVisible(Mod, Loc); } void Sema::ActOnPragmaRedefineExtname(IdentifierInfo* Name, @@ -13917,7 +14247,10 @@ Decl *Sema::getObjCDeclContext() const { } AvailabilityResult Sema::getCurContextAvailability() const { - const Decl *D = cast<Decl>(getCurObjCLexicalContext()); + const Decl *D = cast_or_null<Decl>(getCurObjCLexicalContext()); + if (!D) + return AR_Available; + // If we are within an Objective-C method, we should consult // both the availability of the method as well as the // enclosing class. If the class is (say) deprecated, diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp index d7ce6f1..31fe055 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp @@ -51,6 +51,11 @@ namespace AttributeLangSupport { static bool isFunctionOrMethod(const Decl *D) { return (D->getFunctionType() != nullptr) || isa<ObjCMethodDecl>(D); } +/// \brief Return true if the given decl has function type (function or +/// function-typed variable) or an Objective-C method or a block. +static bool isFunctionOrMethodOrBlock(const Decl *D) { + return isFunctionOrMethod(D) || isa<BlockDecl>(D); +} /// Return true if the given decl has a declarator that should have /// been processed by Sema::GetTypeForDeclarator. @@ -257,7 +262,7 @@ static bool checkFunctionOrMethodParameterIndex(Sema &S, const Decl *D, unsigned AttrArgNum, const Expr *IdxExpr, uint64_t &Idx) { - assert(isFunctionOrMethod(D)); + assert(isFunctionOrMethodOrBlock(D)); // In C++ the implicit 'this' function parameter also counts. // Parameters are counted from one. @@ -351,13 +356,13 @@ static bool isIntOrBool(Expr *Exp) { // Check to see if the type is a smart pointer of some kind. We assume // it's a smart pointer if it defines both operator-> and operator*. static bool threadSafetyCheckIsSmartPointer(Sema &S, const RecordType* RT) { - DeclContextLookupConstResult Res1 = RT->getDecl()->lookup( - S.Context.DeclarationNames.getCXXOperatorName(OO_Star)); + DeclContextLookupResult Res1 = RT->getDecl()->lookup( + S.Context.DeclarationNames.getCXXOperatorName(OO_Star)); if (Res1.empty()) return false; - DeclContextLookupConstResult Res2 = RT->getDecl()->lookup( - S.Context.DeclarationNames.getCXXOperatorName(OO_Arrow)); + DeclContextLookupResult Res2 = RT->getDecl()->lookup( + S.Context.DeclarationNames.getCXXOperatorName(OO_Arrow)); if (Res2.empty()) return false; @@ -1492,6 +1497,20 @@ static void handleAliasAttr(Sema &S, Decl *D, const AttributeList &Attr) { return; } + // Aliases should be on declarations, not definitions. + if (const auto *FD = dyn_cast<FunctionDecl>(D)) { + if (FD->isThisDeclarationADefinition()) { + S.Diag(Attr.getLoc(), diag::err_alias_is_definition) << FD; + return; + } + } else { + const auto *VD = cast<VarDecl>(D); + if (VD->isThisDeclarationADefinition() && VD->isExternallyVisible()) { + S.Diag(Attr.getLoc(), diag::err_alias_is_definition) << VD; + return; + } + } + // FIXME: check if target symbol exists in current file D->addAttr(::new (S.Context) AliasAttr(Attr.getRange(), S.Context, Str, @@ -1534,18 +1553,16 @@ static void handleTLSModelAttr(Sema &S, Decl *D, Attr.getAttributeSpellingListIndex())); } -static void handleMallocAttr(Sema &S, Decl *D, const AttributeList &Attr) { - if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { - QualType RetTy = FD->getReturnType(); - if (RetTy->isAnyPointerType() || RetTy->isBlockPointerType()) { - D->addAttr(::new (S.Context) - MallocAttr(Attr.getRange(), S.Context, - Attr.getAttributeSpellingListIndex())); - return; - } +static void handleRestrictAttr(Sema &S, Decl *D, const AttributeList &Attr) { + QualType ResultType = getFunctionOrMethodResultType(D); + if (ResultType->isAnyPointerType() || ResultType->isBlockPointerType()) { + D->addAttr(::new (S.Context) RestrictAttr( + Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); + return; } - S.Diag(Attr.getLoc(), diag::warn_attribute_malloc_pointer_only); + S.Diag(Attr.getLoc(), diag::warn_attribute_return_pointers_only) + << Attr.getName() << getFunctionOrMethodResultSourceRange(D); } static void handleCommonAttr(Sema &S, Decl *D, const AttributeList &Attr) { @@ -1589,7 +1606,7 @@ static void handleAnalyzerNoReturnAttr(Sema &S, Decl *D, // The checking path for 'noreturn' and 'analyzer_noreturn' are different // because 'analyzer_noreturn' does not impact the type. - if (!isFunctionOrMethod(D) && !isa<BlockDecl>(D)) { + if (!isFunctionOrMethodOrBlock(D)) { ValueDecl *VD = dyn_cast<ValueDecl>(D); if (!VD || (!VD->getType()->isBlockPointerType() && !VD->getType()->isFunctionPointerType())) { @@ -2105,6 +2122,22 @@ static void handleObjCNSObject(Sema &S, Decl *D, const AttributeList &Attr) { Attr.getAttributeSpellingListIndex())); } +static void handleObjCIndependentClass(Sema &S, Decl *D, const AttributeList &Attr) { + if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) { + QualType T = TD->getUnderlyingType(); + if (!T->isObjCObjectPointerType()) { + S.Diag(TD->getLocation(), diag::warn_ptr_independentclass_attribute); + return; + } + } else { + S.Diag(D->getLocation(), diag::warn_independentclass_attribute); + return; + } + D->addAttr(::new (S.Context) + ObjCIndependentClassAttr(Attr.getRange(), S.Context, + Attr.getAttributeSpellingListIndex())); +} + static void handleBlocksAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (!Attr.isArgIdent(0)) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type) @@ -2330,6 +2363,15 @@ SectionAttr *Sema::mergeSectionAttr(Decl *D, SourceRange Range, AttrSpellingListIndex); } +bool Sema::checkSectionName(SourceLocation LiteralLoc, StringRef SecName) { + std::string Error = Context.getTargetInfo().isValidSectionSpecifier(SecName); + if (!Error.empty()) { + Diag(LiteralLoc, diag::err_attribute_section_invalid_for_target) << Error; + return false; + } + return true; +} + static void handleSectionAttr(Sema &S, Decl *D, const AttributeList &Attr) { // Make sure that there is a string literal as the sections's single // argument. @@ -2338,6 +2380,9 @@ static void handleSectionAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (!S.checkStringLiteralArgumentAttr(Attr, 0, Str, &LiteralLoc)) return; + if (!S.checkSectionName(LiteralLoc, Str)) + return; + // If the target wants to validate the section specifier, make it happen. std::string Error = S.Context.getTargetInfo().isValidSectionSpecifier(Str); if (!Error.empty()) { @@ -2482,6 +2527,7 @@ static FormatAttrKind getFormatAttrKind(StringRef Format) { .Cases("cmn_err", "vcmn_err", "zcmn_err", SupportedFormat) .Case("kprintf", SupportedFormat) // OpenBSD. .Case("freebsd_kprintf", SupportedFormat) // FreeBSD. + .Case("os_trace", SupportedFormat) .Cases("gcc_diag", "gcc_cdiag", "gcc_cxxdiag", "gcc_tdiag", IgnoredFormat) .Default(InvalidFormat); @@ -2838,6 +2884,16 @@ static void handleAlignedAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (!Attr.isPackExpansion() && S.DiagnoseUnexpandedParameterPack(E)) return; + if (E->isValueDependent()) { + if (const auto *TND = dyn_cast<TypedefNameDecl>(D)) { + if (!TND->getUnderlyingType()->isDependentType()) { + S.Diag(Attr.getLoc(), diag::err_alignment_dependent_typedef_name) + << E->getSourceRange(); + return; + } + } + } + S.AddAlignedAttr(Attr.getRange(), D, E, Attr.getAttributeSpellingListIndex(), Attr.isPackExpansion()); } @@ -2940,12 +2996,15 @@ void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *TS, void Sema::CheckAlignasUnderalignment(Decl *D) { assert(D->hasAttrs() && "no attributes on decl"); - QualType Ty; - if (ValueDecl *VD = dyn_cast<ValueDecl>(D)) - Ty = VD->getType(); - else - Ty = Context.getTagDeclType(cast<TagDecl>(D)); - if (Ty->isDependentType() || Ty->isIncompleteType()) + QualType UnderlyingTy, DiagTy; + if (ValueDecl *VD = dyn_cast<ValueDecl>(D)) { + UnderlyingTy = DiagTy = VD->getType(); + } else { + UnderlyingTy = DiagTy = Context.getTagDeclType(cast<TagDecl>(D)); + if (EnumDecl *ED = dyn_cast<EnumDecl>(D)) + UnderlyingTy = ED->getIntegerType(); + } + if (DiagTy->isDependentType() || DiagTy->isIncompleteType()) return; // C++11 [dcl.align]p5, C11 6.7.5/4: @@ -2964,10 +3023,10 @@ void Sema::CheckAlignasUnderalignment(Decl *D) { if (AlignasAttr && Align) { CharUnits RequestedAlign = Context.toCharUnitsFromBits(Align); - CharUnits NaturalAlign = Context.getTypeAlignInChars(Ty); + CharUnits NaturalAlign = Context.getTypeAlignInChars(UnderlyingTy); if (NaturalAlign > RequestedAlign) Diag(AlignasAttr->getLocation(), diag::err_alignas_underaligned) - << Ty << (unsigned)NaturalAlign.getQuantity(); + << DiagTy << (unsigned)NaturalAlign.getQuantity(); } } @@ -3315,11 +3374,6 @@ static void handleCallConvAttr(Sema &S, Decl *D, const AttributeList &Attr) { Attr.getAttributeSpellingListIndex())); return; } - case AttributeList::AT_PnaclCall: - D->addAttr(::new (S.Context) - PnaclCallAttr(Attr.getRange(), S.Context, - Attr.getAttributeSpellingListIndex())); - return; case AttributeList::AT_IntelOclBicc: D->addAttr(::new (S.Context) IntelOclBiccAttr(Attr.getRange(), S.Context, @@ -3376,16 +3430,18 @@ bool Sema::CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC, Diag(attr.getLoc(), diag::err_invalid_pcs); return true; } - case AttributeList::AT_PnaclCall: CC = CC_PnaclCall; break; case AttributeList::AT_IntelOclBicc: CC = CC_IntelOclBicc; break; default: llvm_unreachable("unexpected attribute kind"); } const TargetInfo &TI = Context.getTargetInfo(); TargetInfo::CallingConvCheckResult A = TI.checkCallingConvention(CC); - if (A == TargetInfo::CCCR_Warning) { - Diag(attr.getLoc(), diag::warn_cconv_ignored) << attr.getName(); + if (A != TargetInfo::CCCR_OK) { + if (A == TargetInfo::CCCR_Warning) + Diag(attr.getLoc(), diag::warn_cconv_ignored) << attr.getName(); + // This convention is not valid for the target. Use the default function or + // method calling convention. TargetInfo::CallingConvMethodType MT = TargetInfo::CCMT_Unknown; if (FD) MT = FD->isCXXInstanceMember() ? TargetInfo::CCMT_Member : @@ -3432,20 +3488,63 @@ bool Sema::CheckRegparmAttr(const AttributeList &Attr, unsigned &numParams) { return false; } +// Checks whether an argument of launch_bounds attribute is acceptable +// May output an error. +static bool checkLaunchBoundsArgument(Sema &S, Expr *E, + const CUDALaunchBoundsAttr &Attr, + const unsigned Idx) { + + if (S.DiagnoseUnexpandedParameterPack(E)) + return false; + + // Accept template arguments for now as they depend on something else. + // We'll get to check them when they eventually get instantiated. + if (E->isValueDependent()) + return true; + + llvm::APSInt I(64); + if (!E->isIntegerConstantExpr(I, S.Context)) { + S.Diag(E->getExprLoc(), diag::err_attribute_argument_n_type) + << &Attr << Idx << AANT_ArgumentIntegerConstant << E->getSourceRange(); + return false; + } + // Make sure we can fit it in 32 bits. + if (!I.isIntN(32)) { + S.Diag(E->getExprLoc(), diag::err_ice_too_large) << I.toString(10, false) + << 32 << /* Unsigned */ 1; + return false; + } + if (I < 0) + S.Diag(E->getExprLoc(), diag::warn_attribute_argument_n_negative) + << &Attr << Idx << E->getSourceRange(); + + return true; +} + +void Sema::AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, + Expr *MinBlocks, unsigned SpellingListIndex) { + CUDALaunchBoundsAttr TmpAttr(AttrRange, Context, MaxThreads, MinBlocks, + SpellingListIndex); + + if (!checkLaunchBoundsArgument(*this, MaxThreads, TmpAttr, 0)) + return; + + if (MinBlocks && !checkLaunchBoundsArgument(*this, MinBlocks, TmpAttr, 1)) + return; + + D->addAttr(::new (Context) CUDALaunchBoundsAttr( + AttrRange, Context, MaxThreads, MinBlocks, SpellingListIndex)); +} + static void handleLaunchBoundsAttr(Sema &S, Decl *D, const AttributeList &Attr) { - uint32_t MaxThreads, MinBlocks = 0; - if (!checkUInt32Argument(S, Attr, Attr.getArgAsExpr(0), MaxThreads, 1)) - return; - if (Attr.getNumArgs() > 1 && !checkUInt32Argument(S, Attr, - Attr.getArgAsExpr(1), - MinBlocks, 2)) + if (!checkAttributeAtLeastNumArgs(S, Attr, 1) || + !checkAttributeAtMostNumArgs(S, Attr, 2)) return; - D->addAttr(::new (S.Context) - CUDALaunchBoundsAttr(Attr.getRange(), S.Context, - MaxThreads, MinBlocks, - Attr.getAttributeSpellingListIndex())); + S.AddLaunchBoundsAttr(Attr.getRange(), D, Attr.getArgAsExpr(0), + Attr.getNumArgs() > 1 ? Attr.getArgAsExpr(1) : nullptr, + Attr.getAttributeSpellingListIndex()); } static void handleArgumentWithTypeTagAttr(Sema &S, Decl *D, @@ -3724,6 +3823,22 @@ static void handleObjCBridgeAttr(Sema &S, Scope *Sc, Decl *D, S.Diag(D->getLocStart(), diag::err_objc_attr_not_id) << Attr.getName() << 0; return; } + + // Typedefs only allow objc_bridge(id) and have some additional checking. + if (auto TD = dyn_cast<TypedefNameDecl>(D)) { + if (!Parm->Ident->isStr("id")) { + S.Diag(Attr.getLoc(), diag::err_objc_attr_typedef_not_id) + << Attr.getName(); + return; + } + + // Only allow 'cv void *'. + QualType T = TD->getUnderlyingType(); + if (!T->isVoidPointerType()) { + S.Diag(Attr.getLoc(), diag::err_objc_attr_typedef_not_void_pointer); + return; + } + } D->addAttr(::new (S.Context) ObjCBridgeAttr(Attr.getRange(), S.Context, Parm->Ident, @@ -3770,6 +3885,10 @@ static void handleObjCDesignatedInitializer(Sema &S, Decl *D, IFace = CatDecl->getClassInterface(); else IFace = cast<ObjCInterfaceDecl>(D->getDeclContext()); + + if (!IFace) + return; + IFace->setHasDesignatedInitializers(); D->addAttr(::new (S.Context) ObjCDesignatedInitializerAttr(Attr.getRange(), S.Context, @@ -4226,9 +4345,52 @@ static void handleDeprecatedAttr(Sema &S, Decl *D, const AttributeList &Attr) { return; } } + + if (!S.getLangOpts().CPlusPlus14) + if (Attr.isCXX11Attribute() && + !(Attr.hasScope() && Attr.getScopeName()->isStr("gnu"))) + S.Diag(Attr.getLoc(), diag::ext_deprecated_attr_is_a_cxx14_extension); + handleAttrWithMessage<DeprecatedAttr>(S, D, Attr); } +static void handleNoSanitizeAttr(Sema &S, Decl *D, const AttributeList &Attr) { + if (!checkAttributeAtLeastNumArgs(S, Attr, 1)) + return; + + std::vector<std::string> Sanitizers; + + for (unsigned I = 0, E = Attr.getNumArgs(); I != E; ++I) { + StringRef SanitizerName; + SourceLocation LiteralLoc; + + if (!S.checkStringLiteralArgumentAttr(Attr, I, SanitizerName, &LiteralLoc)) + return; + + if (parseSanitizerValue(SanitizerName, /*AllowGroups=*/true) == 0) + S.Diag(LiteralLoc, diag::warn_unknown_sanitizer_ignored) << SanitizerName; + + Sanitizers.push_back(SanitizerName); + } + + D->addAttr(::new (S.Context) NoSanitizeAttr( + Attr.getRange(), S.Context, Sanitizers.data(), Sanitizers.size(), + Attr.getAttributeSpellingListIndex())); +} + +static void handleNoSanitizeSpecificAttr(Sema &S, Decl *D, + const AttributeList &Attr) { + std::string SanitizerName = + llvm::StringSwitch<std::string>(Attr.getName()->getName()) + .Case("no_address_safety_analysis", "address") + .Case("no_sanitize_address", "address") + .Case("no_sanitize_thread", "thread") + .Case("no_sanitize_memory", "memory"); + D->addAttr(::new (S.Context) + NoSanitizeAttr(Attr.getRange(), S.Context, &SanitizerName, 1, + Attr.getAttributeSpellingListIndex())); +} + /// Handles semantic checking for features that are common to all attributes, /// such as checking whether a parameter was properly specified, or the correct /// number of arguments were passed, etc. @@ -4396,6 +4558,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, case AttributeList::AT_OptimizeNone: handleOptimizeNoneAttr(S, D, Attr); break; + case AttributeList::AT_FlagEnum: + handleSimpleAttribute<FlagEnumAttr>(S, D, Attr); + break; case AttributeList::AT_Flatten: handleSimpleAttribute<FlattenAttr>(S, D, Attr); break; @@ -4420,8 +4585,8 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, case AttributeList::AT_CUDALaunchBounds: handleLaunchBoundsAttr(S, D, Attr); break; - case AttributeList::AT_Malloc: - handleMallocAttr(S, D, Attr); + case AttributeList::AT_Restrict: + handleRestrictAttr(S, D, Attr); break; case AttributeList::AT_MayAlias: handleSimpleAttribute<MayAliasAttr>(S, D, Attr); @@ -4609,6 +4774,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, case AttributeList::AT_ObjCNSObject: handleObjCNSObject(S, D, Attr); break; + case AttributeList::AT_ObjCIndependentClass: + handleObjCIndependentClass(S, D, Attr); + break; case AttributeList::AT_Blocks: handleBlocksAttr(S, D, Attr); break; @@ -4645,7 +4813,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, case AttributeList::AT_MSABI: case AttributeList::AT_SysVABI: case AttributeList::AT_Pcs: - case AttributeList::AT_PnaclCall: case AttributeList::AT_IntelOclBicc: handleCallConvAttr(S, D, Attr); break; @@ -4657,8 +4824,11 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, break; // Microsoft attributes: - case AttributeList::AT_MsStruct: - handleSimpleAttribute<MsStructAttr>(S, D, Attr); + case AttributeList::AT_MSNoVTable: + handleSimpleAttribute<MSNoVTableAttr>(S, D, Attr); + break; + case AttributeList::AT_MSStruct: + handleSimpleAttribute<MSStructAttr>(S, D, Attr); break; case AttributeList::AT_Uuid: handleUuidAttr(S, D, Attr); @@ -4689,18 +4859,15 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, case AttributeList::AT_ScopedLockable: handleSimpleAttribute<ScopedLockableAttr>(S, D, Attr); break; - case AttributeList::AT_NoSanitizeAddress: - handleSimpleAttribute<NoSanitizeAddressAttr>(S, D, Attr); + case AttributeList::AT_NoSanitize: + handleNoSanitizeAttr(S, D, Attr); + break; + case AttributeList::AT_NoSanitizeSpecific: + handleNoSanitizeSpecificAttr(S, D, Attr); break; case AttributeList::AT_NoThreadSafetyAnalysis: handleSimpleAttribute<NoThreadSafetyAnalysisAttr>(S, D, Attr); break; - case AttributeList::AT_NoSanitizeThread: - handleSimpleAttribute<NoSanitizeThreadAttr>(S, D, Attr); - break; - case AttributeList::AT_NoSanitizeMemory: - handleSimpleAttribute<NoSanitizeMemoryAttr>(S, D, Attr); - break; case AttributeList::AT_GuardedBy: handleGuardedByAttr(S, D, Attr); break; @@ -4963,8 +5130,7 @@ void Sema::ProcessPragmaWeak(Scope *S, Decl *D) { ND = FD; if (ND) { if (IdentifierInfo *Id = ND->getIdentifier()) { - llvm::DenseMap<IdentifierInfo*,WeakInfo>::iterator I - = WeakUndeclaredIdentifiers.find(Id); + auto I = WeakUndeclaredIdentifiers.find(Id); if (I != WeakUndeclaredIdentifiers.end()) { WeakInfo W = I->second; DeclApplyPragmaWeak(S, ND, W); @@ -5043,7 +5209,8 @@ static bool isDeclDeprecated(Decl *D) { return true; // A category implicitly has the availability of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(D)) - return CatD->getClassInterface()->isDeprecated(); + if (const ObjCInterfaceDecl *Interface = CatD->getClassInterface()) + return Interface->isDeprecated(); } while ((D = cast_or_null<Decl>(D->getDeclContext()))); return false; } @@ -5054,12 +5221,13 @@ static bool isDeclUnavailable(Decl *D) { return true; // A category implicitly has the availability of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(D)) - return CatD->getClassInterface()->isUnavailable(); + if (const ObjCInterfaceDecl *Interface = CatD->getClassInterface()) + return Interface->isUnavailable(); } while ((D = cast_or_null<Decl>(D->getDeclContext()))); return false; } -static void DoEmitAvailabilityWarning(Sema &S, DelayedDiagnostic::DDKind K, +static void DoEmitAvailabilityWarning(Sema &S, Sema::AvailabilityDiagnostic K, Decl *Ctx, const NamedDecl *D, StringRef Message, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, @@ -5076,8 +5244,8 @@ static void DoEmitAvailabilityWarning(Sema &S, DelayedDiagnostic::DDKind K, // Don't warn if our current context is deprecated or unavailable. switch (K) { - case DelayedDiagnostic::Deprecation: - if (isDeclDeprecated(Ctx)) + case Sema::AD_Deprecation: + if (isDeclDeprecated(Ctx) || isDeclUnavailable(Ctx)) return; diag = !ObjCPropertyAccess ? diag::warn_deprecated : diag::warn_property_method_deprecated; @@ -5087,7 +5255,7 @@ static void DoEmitAvailabilityWarning(Sema &S, DelayedDiagnostic::DDKind K, available_here_select_kind = /* deprecated */ 2; break; - case DelayedDiagnostic::Unavailable: + case Sema::AD_Unavailable: if (isDeclUnavailable(Ctx)) return; diag = !ObjCPropertyAccess ? diag::err_unavailable @@ -5098,8 +5266,13 @@ static void DoEmitAvailabilityWarning(Sema &S, DelayedDiagnostic::DDKind K, available_here_select_kind = /* unavailable */ 0; break; - default: - llvm_unreachable("Neither a deprecation or unavailable kind"); + case Sema::AD_Partial: + diag = diag::warn_partial_availability; + diag_message = diag::warn_partial_message; + diag_fwdclass_message = diag::warn_partial_fwdclass_message; + property_note_select = /* partial */ 2; + available_here_select_kind = /* partial */ 3; + break; } if (!Message.empty()) { @@ -5119,15 +5292,21 @@ static void DoEmitAvailabilityWarning(Sema &S, DelayedDiagnostic::DDKind K, S.Diag(D->getLocation(), diag::note_availability_specified_here) << D << available_here_select_kind; + if (K == Sema::AD_Partial) + S.Diag(Loc, diag::note_partial_availability_silence) << D; } static void handleDelayedAvailabilityCheck(Sema &S, DelayedDiagnostic &DD, Decl *Ctx) { + assert(DD.Kind == DelayedDiagnostic::Deprecation || + DD.Kind == DelayedDiagnostic::Unavailable); + Sema::AvailabilityDiagnostic AD = DD.Kind == DelayedDiagnostic::Deprecation + ? Sema::AD_Deprecation + : Sema::AD_Unavailable; DD.Triggered = true; - DoEmitAvailabilityWarning(S, (DelayedDiagnostic::DDKind)DD.Kind, Ctx, - DD.getDeprecationDecl(), DD.getDeprecationMessage(), - DD.Loc, DD.getUnknownObjCClass(), - DD.getObjCProperty(), false); + DoEmitAvailabilityWarning( + S, AD, Ctx, DD.getDeprecationDecl(), DD.getDeprecationMessage(), DD.Loc, + DD.getUnknownObjCClass(), DD.getObjCProperty(), false); } void Sema::PopParsingDeclaration(ParsingDeclState state, Decl *decl) { @@ -5193,7 +5372,7 @@ void Sema::EmitAvailabilityWarning(AvailabilityDiagnostic AD, const ObjCPropertyDecl *ObjCProperty, bool ObjCPropertyAccess) { // Delay if we're currently parsing a declaration. - if (DelayedDiagnostics.shouldDelayDiagnostics()) { + if (DelayedDiagnostics.shouldDelayDiagnostics() && AD != AD_Partial) { DelayedDiagnostics.add(DelayedDiagnostic::makeAvailability( AD, Loc, D, UnknownObjCClass, ObjCProperty, Message, ObjCPropertyAccess)); @@ -5201,16 +5380,6 @@ void Sema::EmitAvailabilityWarning(AvailabilityDiagnostic AD, } Decl *Ctx = cast<Decl>(getCurLexicalContext()); - DelayedDiagnostic::DDKind K; - switch (AD) { - case AD_Deprecation: - K = DelayedDiagnostic::Deprecation; - break; - case AD_Unavailable: - K = DelayedDiagnostic::Unavailable; - break; - } - - DoEmitAvailabilityWarning(*this, K, Ctx, D, Message, Loc, - UnknownObjCClass, ObjCProperty, ObjCPropertyAccess); + DoEmitAvailabilityWarning(*this, AD, Ctx, D, Message, Loc, UnknownObjCClass, + ObjCProperty, ObjCPropertyAccess); } diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp index 510738e..b1dfe0e 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp @@ -316,8 +316,17 @@ Sema::ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, if (DiagnoseUnexpandedParameterPack(DefaultArg, UPPC_DefaultArgument)) { Param->setInvalidDecl(); return; - } - + } + + // C++11 [dcl.fct.default]p3 + // A default argument expression [...] shall not be specified for a + // parameter pack. + if (Param->isParameterPack()) { + Diag(EqualLoc, diag::err_param_default_argument_on_parameter_pack) + << DefaultArg->getSourceRange(); + return; + } + // Check that the default argument is well-formed CheckDefaultArgumentVisitor DefaultArgChecker(DefaultArg, this); if (DefaultArgChecker.Visit(DefaultArg)) { @@ -429,6 +438,45 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S) { bool Invalid = false; + // The declaration context corresponding to the scope is the semantic + // parent, unless this is a local function declaration, in which case + // it is that surrounding function. + DeclContext *ScopeDC = New->isLocalExternDecl() + ? New->getLexicalDeclContext() + : New->getDeclContext(); + + // Find the previous declaration for the purpose of default arguments. + FunctionDecl *PrevForDefaultArgs = Old; + for (/**/; PrevForDefaultArgs; + // Don't bother looking back past the latest decl if this is a local + // extern declaration; nothing else could work. + PrevForDefaultArgs = New->isLocalExternDecl() + ? nullptr + : PrevForDefaultArgs->getPreviousDecl()) { + // Ignore hidden declarations. + if (!LookupResult::isVisible(*this, PrevForDefaultArgs)) + continue; + + if (S && !isDeclInScope(PrevForDefaultArgs, ScopeDC, S) && + !New->isCXXClassMember()) { + // Ignore default arguments of old decl if they are not in + // the same scope and this is not an out-of-line definition of + // a member function. + continue; + } + + if (PrevForDefaultArgs->isLocalExternDecl() != New->isLocalExternDecl()) { + // If only one of these is a local function declaration, then they are + // declared in different scopes, even though isDeclInScope may think + // they're in the same scope. (If both are local, the scope check is + // sufficent, and if neither is local, then they are in the same scope.) + continue; + } + + // We found our guy. + break; + } + // C++ [dcl.fct.default]p4: // For non-template functions, default arguments can be added in // later declarations of a function in the same @@ -447,34 +495,17 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, // in a member function definition that appears outside of the class // definition are added to the set of default arguments provided by the // member function declaration in the class definition. - for (unsigned p = 0, NumParams = Old->getNumParams(); p < NumParams; ++p) { - ParmVarDecl *OldParam = Old->getParamDecl(p); + for (unsigned p = 0, NumParams = PrevForDefaultArgs + ? PrevForDefaultArgs->getNumParams() + : 0; + p < NumParams; ++p) { + ParmVarDecl *OldParam = PrevForDefaultArgs->getParamDecl(p); ParmVarDecl *NewParam = New->getParamDecl(p); - bool OldParamHasDfl = OldParam->hasDefaultArg(); + bool OldParamHasDfl = OldParam ? OldParam->hasDefaultArg() : false; bool NewParamHasDfl = NewParam->hasDefaultArg(); - // The declaration context corresponding to the scope is the semantic - // parent, unless this is a local function declaration, in which case - // it is that surrounding function. - DeclContext *ScopeDC = New->isLocalExternDecl() - ? New->getLexicalDeclContext() - : New->getDeclContext(); - if (S && !isDeclInScope(Old, ScopeDC, S) && - !New->getDeclContext()->isRecord()) - // Ignore default parameters of old decl if they are not in - // the same scope and this is not an out-of-line definition of - // a member function. - OldParamHasDfl = false; - if (New->isLocalExternDecl() != Old->isLocalExternDecl()) - // If only one of these is a local function declaration, then they are - // declared in different scopes, even though isDeclInScope may think - // they're in the same scope. (If both are local, the scope check is - // sufficent, and if neither is local, then they are in the same scope.) - OldParamHasDfl = false; - if (OldParamHasDfl && NewParamHasDfl) { - unsigned DiagDefaultParamID = diag::err_param_default_argument_redefinition; @@ -482,7 +513,7 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, // of template class. The new default parameter's value is ignored. Invalid = true; if (getLangOpts().MicrosoftExt) { - CXXMethodDecl* MD = dyn_cast<CXXMethodDecl>(New); + CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(New); if (MD && MD->getParent()->getDescribedClassTemplate()) { // Merge the old default argument into the new parameter. NewParam->setHasInheritedDefaultArg(); @@ -509,14 +540,12 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, // Look for the function declaration where the default argument was // actually written, which may be a declaration prior to Old. - for (FunctionDecl *Older = Old->getPreviousDecl(); - Older; Older = Older->getPreviousDecl()) { - if (!Older->getParamDecl(p)->hasDefaultArg()) - break; - + for (auto Older = PrevForDefaultArgs; + OldParam->hasInheritedDefaultArg(); /**/) { + Older = Older->getPreviousDecl(); OldParam = Older->getParamDecl(p); - } - + } + Diag(OldParam->getLocation(), diag::note_previous_definition) << OldParam->getDefaultArgRange(); } else if (OldParamHasDfl) { @@ -524,7 +553,9 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, // It's important to use getInit() here; getDefaultArg() // strips off any top-level ExprWithCleanups. NewParam->setHasInheritedDefaultArg(); - if (OldParam->hasUninstantiatedDefaultArg()) + if (OldParam->hasUnparsedDefaultArg()) + NewParam->setUnparsedDefaultArg(); + else if (OldParam->hasUninstantiatedDefaultArg()) NewParam->setUninstantiatedDefaultArg( OldParam->getUninstantiatedDefaultArg()); else @@ -535,8 +566,9 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Diag(NewParam->getLocation(), diag::err_param_default_argument_template_redecl) << NewParam->getDefaultArgRange(); - Diag(Old->getLocation(), diag::note_template_prev_declaration) - << false; + Diag(PrevForDefaultArgs->getLocation(), + diag::note_template_prev_declaration) + << false; } else if (New->getTemplateSpecializationKind() != TSK_ImplicitInstantiation && New->getTemplateSpecializationKind() != TSK_Undeclared) { @@ -607,7 +639,8 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, << New << New->isConstexpr(); Diag(Old->getLocation(), diag::note_previous_declaration); Invalid = true; - } else if (!Old->isInlined() && New->isInlined() && Old->isDefined(Def)) { + } else if (!Old->getMostRecentDecl()->isInlined() && New->isInlined() && + Old->isDefined(Def)) { // C++11 [dcl.fcn.spec]p4: // If the definition of a function appears in a translation unit before its // first declaration as inline, the program is ill-formed. @@ -689,16 +722,16 @@ void Sema::CheckCXXDefaultArguments(FunctionDecl *FD) { break; } - // C++ [dcl.fct.default]p4: - // In a given function declaration, all parameters - // subsequent to a parameter with a default argument shall - // have default arguments supplied in this or previous - // declarations. A default argument shall not be redefined - // by a later declaration (not even to the same value). + // C++11 [dcl.fct.default]p4: + // In a given function declaration, each parameter subsequent to a parameter + // with a default argument shall have a default argument supplied in this or + // a previous declaration or shall be a function parameter pack. A default + // argument shall not be redefined by a later declaration (not even to the + // same value). unsigned LastMissingDefaultArg = 0; for (; p < NumParams; ++p) { ParmVarDecl *Param = FD->getParamDecl(p); - if (!Param->hasDefaultArg()) { + if (!Param->hasDefaultArg() && !Param->isParameterPack()) { if (Param->isInvalidDecl()) /* We already complained about this parameter. */; else if (Param->getIdentifier()) @@ -795,7 +828,8 @@ bool Sema::CheckConstexprFunctionDecl(const FunctionDecl *NewFD) { // - it shall not be virtual; const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(NewFD); if (Method && Method->isVirtual()) { - Diag(NewFD->getLocation(), diag::err_constexpr_virtual); + Method = Method->getCanonicalDecl(); + Diag(Method->getLocation(), diag::err_constexpr_virtual); // If it's not obvious why this function is virtual, find an overridden // function which uses the 'virtual' keyword. @@ -1545,6 +1579,31 @@ Sema::ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, return true; } +/// Use small set to collect indirect bases. As this is only used +/// locally, there's no need to abstract the small size parameter. +typedef llvm::SmallPtrSet<QualType, 4> IndirectBaseSet; + +/// \brief Recursively add the bases of Type. Don't add Type itself. +static void +NoteIndirectBases(ASTContext &Context, IndirectBaseSet &Set, + const QualType &Type) +{ + // Even though the incoming type is a base, it might not be + // a class -- it could be a template parm, for instance. + if (auto Rec = Type->getAs<RecordType>()) { + auto Decl = Rec->getAsCXXRecordDecl(); + + // Iterate over its bases. + for (const auto &BaseSpec : Decl->bases()) { + QualType Base = Context.getCanonicalType(BaseSpec.getType()) + .getUnqualifiedType(); + if (Set.insert(Base).second) + // If we've not already seen it, recurse. + NoteIndirectBases(Context, Set, Base); + } + } +} + /// \brief Performs the actual work of attaching the given base class /// specifiers to a C++ class. bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases, @@ -1558,6 +1617,10 @@ bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases, // class. std::map<QualType, CXXBaseSpecifier*, QualTypeOrdering> KnownBaseTypes; + // Used to track indirect bases so we can see if a direct base is + // ambiguous. + IndirectBaseSet IndirectBaseTypes; + // Copy non-redundant base specifiers into permanent storage. unsigned NumGoodBases = 0; bool Invalid = false; @@ -1585,6 +1648,11 @@ bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases, // Okay, add this new base class. KnownBase = Bases[idx]; Bases[NumGoodBases++] = Bases[idx]; + + // Note this base's direct & indirect bases, if there could be ambiguity. + if (NumBases > 1) + NoteIndirectBases(Context, IndirectBaseTypes, NewBaseType); + if (const RecordType *Record = NewBaseType->getAs<RecordType>()) { const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl()); if (Class->isInterface() && @@ -1605,11 +1673,33 @@ bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases, // Attach the remaining base class specifiers to the derived class. Class->setBases(Bases, NumGoodBases); + + for (unsigned idx = 0; idx < NumGoodBases; ++idx) { + // Check whether this direct base is inaccessible due to ambiguity. + QualType BaseType = Bases[idx]->getType(); + CanQualType CanonicalBase = Context.getCanonicalType(BaseType) + .getUnqualifiedType(); + + if (IndirectBaseTypes.count(CanonicalBase)) { + CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, + /*DetectVirtual=*/true); + bool found + = Class->isDerivedFrom(CanonicalBase->getAsCXXRecordDecl(), Paths); + assert(found); + (void)found; + + if (Paths.isAmbiguous(CanonicalBase)) + Diag(Bases[idx]->getLocStart (), diag::warn_inaccessible_base_class) + << BaseType << getAmbiguousPathsDisplayString(Paths) + << Bases[idx]->getSourceRange(); + else + assert(Bases[idx]->isVirtual()); + } - // Delete the remaining (good) base class specifiers, since their - // data has been copied into the CXXRecordDecl. - for (unsigned idx = 0; idx < NumGoodBases; ++idx) + // Delete the base class specifier, since its data has been copied + // into the CXXRecordDecl. Context.Deallocate(Bases[idx]); + } return Invalid; } @@ -1689,18 +1779,6 @@ void Sema::BuildBasePathArray(const CXXBasePaths &Paths, BasePathArray.push_back(const_cast<CXXBaseSpecifier*>(Path[I].Base)); } -/// \brief Determine whether the given base path includes a virtual -/// base class. -bool Sema::BasePathInvolvesVirtualBase(const CXXCastPath &BasePath) { - for (CXXCastPath::const_iterator B = BasePath.begin(), - BEnd = BasePath.end(); - B != BEnd; ++B) - if ((*B)->isVirtual()) - return true; - - return false; -} - /// CheckDerivedToBaseConversion - Check whether the Derived-to-Base /// conversion (where Derived and Base are class types) is /// well-formed, meaning that the conversion is unambiguous (and @@ -2159,7 +2237,8 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, assert(Member && "HandleField never returns null"); } } else { - assert(InitStyle == ICIS_NoInit || D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static); + assert(InitStyle == ICIS_NoInit || + D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static); Member = HandleDeclarator(S, D, TemplateParameterLists); if (!Member) @@ -3537,8 +3616,9 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor, InitializationKind::CreateDirect(Loc, SourceLocation(), SourceLocation()); Expr *CtorArgE = CtorArg.getAs<Expr>(); - InitializationSequence InitSeq(SemaRef, Entities.back(), InitKind, CtorArgE); - + InitializationSequence InitSeq(SemaRef, Entities.back(), InitKind, + CtorArgE); + ExprResult MemberInit = InitSeq.Perform(SemaRef, Entities.back(), InitKind, MultiExprArg(&CtorArgE, 1)); @@ -4659,15 +4739,15 @@ static void CheckAbstractClassUsage(AbstractUsageInfo &Info, } /// \brief Check class-level dllimport/dllexport attribute. -static void checkDLLAttribute(Sema &S, CXXRecordDecl *Class) { +void Sema::checkClassLevelDLLAttribute(CXXRecordDecl *Class) { Attr *ClassAttr = getDLLAttr(Class); // MSVC inherits DLL attributes to partial class template specializations. - if (S.Context.getTargetInfo().getCXXABI().isMicrosoft() && !ClassAttr) { + if (Context.getTargetInfo().getCXXABI().isMicrosoft() && !ClassAttr) { if (auto *Spec = dyn_cast<ClassTemplatePartialSpecializationDecl>(Class)) { if (Attr *TemplateAttr = getDLLAttr(Spec->getSpecializedTemplate()->getTemplatedDecl())) { - auto *A = cast<InheritableAttr>(TemplateAttr->clone(S.getASTContext())); + auto *A = cast<InheritableAttr>(TemplateAttr->clone(getASTContext())); A->setInherited(true); ClassAttr = A; } @@ -4678,12 +4758,12 @@ static void checkDLLAttribute(Sema &S, CXXRecordDecl *Class) { return; if (!Class->isExternallyVisible()) { - S.Diag(Class->getLocation(), diag::err_attribute_dll_not_extern) + Diag(Class->getLocation(), diag::err_attribute_dll_not_extern) << Class << ClassAttr; return; } - if (S.Context.getTargetInfo().getCXXABI().isMicrosoft() && + if (Context.getTargetInfo().getCXXABI().isMicrosoft() && !ClassAttr->isInherited()) { // Diagnose dll attributes on members of class with dll attribute. for (Decl *Member : Class->decls()) { @@ -4693,10 +4773,10 @@ static void checkDLLAttribute(Sema &S, CXXRecordDecl *Class) { if (!MemberAttr || MemberAttr->isInherited() || Member->isInvalidDecl()) continue; - S.Diag(MemberAttr->getLocation(), + Diag(MemberAttr->getLocation(), diag::err_attribute_dll_member_of_dll_class) << MemberAttr << ClassAttr; - S.Diag(ClassAttr->getLocation(), diag::note_previous_attribute); + Diag(ClassAttr->getLocation(), diag::note_previous_attribute); Member->setInvalidDecl(); } } @@ -4709,15 +4789,20 @@ static void checkDLLAttribute(Sema &S, CXXRecordDecl *Class) { const bool ClassExported = ClassAttr->getKind() == attr::DLLExport; const bool ClassImported = !ClassExported; + TemplateSpecializationKind TSK = Class->getTemplateSpecializationKind(); + + // Don't dllexport explicit class template instantiation declarations. + if (ClassExported && TSK == TSK_ExplicitInstantiationDeclaration) { + Class->dropAttr<DLLExportAttr>(); + return; + } + // Force declaration of implicit members so they can inherit the attribute. - S.ForceDeclarationOfImplicitMembers(Class); + ForceDeclarationOfImplicitMembers(Class); // FIXME: MSVC's docs say all bases must be exportable, but this doesn't // seem to be true in practice? - TemplateSpecializationKind TSK = - Class->getTemplateSpecializationKind(); - for (Decl *Member : Class->decls()) { VarDecl *VD = dyn_cast<VarDecl>(Member); CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Member); @@ -4731,22 +4816,25 @@ static void checkDLLAttribute(Sema &S, CXXRecordDecl *Class) { if (MD->isDeleted()) continue; - if (MD->isMoveAssignmentOperator() && ClassImported && MD->isInlined()) { - // Current MSVC versions don't export the move assignment operators, so - // don't attempt to import them if we have a definition. - continue; - } + if (MD->isInlined()) { + // MinGW does not import or export inline methods. + if (!Context.getTargetInfo().getCXXABI().isMicrosoft()) + continue; - if (MD->isInlined() && ClassImported && - !S.Context.getTargetInfo().getCXXABI().isMicrosoft()) { - // MinGW does not import inline functions. - continue; + // MSVC versions before 2015 don't export the move assignment operators, + // so don't attempt to import them if we have a definition. + if (ClassImported && MD->isMoveAssignmentOperator() && + !getLangOpts().isCompatibleWithMSVC(LangOptions::MSVC2015)) + continue; } } + if (!cast<NamedDecl>(Member)->isExternallyVisible()) + continue; + if (!getDLLAttr(Member)) { auto *NewAttr = - cast<InheritableAttr>(ClassAttr->clone(S.getASTContext())); + cast<InheritableAttr>(ClassAttr->clone(getASTContext())); NewAttr->setInherited(true); Member->addAttr(NewAttr); } @@ -4761,7 +4849,7 @@ static void checkDLLAttribute(Sema &S, CXXRecordDecl *Class) { if (TSK == TSK_ImplicitInstantiation && !ClassAttr->isInherited()) continue; - S.MarkFunctionReferenced(Class->getLocation(), MD); + MarkFunctionReferenced(Class->getLocation(), MD); // The function will be passed to the consumer when its definition is // encountered. @@ -4772,11 +4860,17 @@ static void checkDLLAttribute(Sema &S, CXXRecordDecl *Class) { // defaulted methods, and the copy and move assignment operators. The // latter are exported even if they are trivial, because the address of // an operator can be taken and should compare equal accross libraries. - S.MarkFunctionReferenced(Class->getLocation(), MD); + DiagnosticErrorTrap Trap(Diags); + MarkFunctionReferenced(Class->getLocation(), MD); + if (Trap.hasErrorOccurred()) { + Diag(ClassAttr->getLocation(), diag::note_due_to_dllexported_class) + << Class->getName() << !getLangOpts().CPlusPlus11; + break; + } // There is no later point when we will see the definition of this // function, so pass it to the consumer now. - S.Consumer.HandleTopLevelDecl(DeclGroupRef(MD)); + Consumer.HandleTopLevelDecl(DeclGroupRef(MD)); } } } @@ -4820,9 +4914,6 @@ void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) { } } - if (Record->isDynamicClass() && !Record->isDependentType()) - DynamicClasses.push_back(Record); - if (Record->getIdentifier()) { // C++ [class.mem]p13: // If T is the name of a class, then each of the following shall have a @@ -4923,7 +5014,7 @@ void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) { // have inheriting constructors. DeclareInheritingConstructors(Record); - checkDLLAttribute(*this, Record); + checkClassLevelDLLAttribute(Record); } /// Look up the special member function that would be called by a special @@ -7318,7 +7409,7 @@ bool Sema::isStdInitializerList(QualType Ty, QualType *Element) { StdInitializerList = Template; } - if (Template != StdInitializerList) + if (Template->getCanonicalDecl() != StdInitializerList->getCanonicalDecl()) return false; // This is an instance of std::initializer_list. Find the argument type. @@ -8029,15 +8120,7 @@ NamedDecl *Sema::BuildUsingDeclaration(Scope *S, AccessSpecifier AS, if (RequireCompleteDeclContext(SS, LookupContext)) return BuildInvalid(); - // The normal rules do not apply to inheriting constructor declarations. - if (NameInfo.getName().getNameKind() == DeclarationName::CXXConstructorName) { - UsingDecl *UD = BuildValid(); - CheckInheritingConstructorUsingDecl(UD); - return UD; - } - - // Otherwise, look up the target name. - + // Look up the target name. LookupResult R(*this, NameInfo, LookupOrdinaryName); // Unlike most lookups, we don't always want to hide tag @@ -8056,8 +8139,12 @@ NamedDecl *Sema::BuildUsingDeclaration(Scope *S, AccessSpecifier AS, LookupQualifiedName(R, LookupContext); - // Try to correct typos if possible. - if (R.empty()) { + // Try to correct typos if possible. If constructor name lookup finds no + // results, that means the named class has no explicit constructors, and we + // suppressed declaring implicit ones (probably because it's dependent or + // invalid). + if (R.empty() && + NameInfo.getName().getNameKind() != DeclarationName::CXXConstructorName) { if (TypoCorrection Corrected = CorrectTypo( R.getLookupNameInfo(), R.getLookupKind(), S, &SS, llvm::make_unique<UsingValidatorCCC>( @@ -8087,16 +8174,12 @@ NamedDecl *Sema::BuildUsingDeclaration(Scope *S, AccessSpecifier AS, NameInfo.setName(Context.DeclarationNames.getCXXConstructorName( Context.getCanonicalType(Context.getRecordType(RD)))); NameInfo.setNamedTypeInfo(nullptr); - - // Build it and process it as an inheriting constructor. - UsingDecl *UD = BuildValid(); - CheckInheritingConstructorUsingDecl(UD); - return UD; + for (auto *Ctor : LookupConstructors(RD)) + R.addDecl(Ctor); + } else { + // FIXME: Pick up all the declarations if we found an overloaded function. + R.addDecl(ND); } - - // FIXME: Pick up all the declarations if we found an overloaded function. - R.setLookupName(Corrected.getCorrection()); - R.addDecl(ND); } else { Diag(IdentLoc, diag::err_no_member) << NameInfo.getName() << LookupContext << SS.getRange(); @@ -8136,6 +8219,18 @@ NamedDecl *Sema::BuildUsingDeclaration(Scope *S, AccessSpecifier AS, } UsingDecl *UD = BuildValid(); + + // The normal rules do not apply to inheriting constructor declarations. + if (NameInfo.getName().getNameKind() == DeclarationName::CXXConstructorName) { + // Suppress access diagnostics; the access check is instead performed at the + // point of use for an inheriting constructor. + R.suppressDiagnostics(); + CheckInheritingConstructorUsingDecl(UD); + return UD; + } + + // Otherwise, look up the target name. + for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { UsingShadowDecl *PrevDecl = nullptr; if (!CheckUsingShadowDecl(UD, *I, Previous, PrevDecl)) @@ -8421,7 +8516,8 @@ Decl *Sema::ActOnAliasDeclaration(Scope *S, SourceLocation UsingLoc, UnqualifiedId &Name, AttributeList *AttrList, - TypeResult Type) { + TypeResult Type, + Decl *DeclFromDeclSpec) { // Skip up to the relevant declaration scope. while (S->getFlags() & Scope::TemplateParamScope) S = S->getParent(); @@ -8549,6 +8645,10 @@ Decl *Sema::ActOnAliasDeclaration(Scope *S, NewND = NewDecl; } else { + if (auto *TD = dyn_cast_or_null<TagDecl>(DeclFromDeclSpec)) { + setTagNameForLinkagePurposes(TD, NewTD); + handleTagNumbering(TD, S); + } ActOnTypedefNameDecl(S, CurContext, NewTD, Previous, Redeclaration); NewND = NewTD; } @@ -9014,7 +9114,7 @@ private: ASTContext &Context = SemaRef.Context; DeclarationName Name = Context.DeclarationNames.getCXXConstructorName( Context.getCanonicalType(Context.getRecordType(Base))); - DeclContext::lookup_const_result Decls = Derived->lookup(Name); + DeclContext::lookup_result Decls = Derived->lookup(Name); return Decls.empty() ? Derived->getLocation() : Decls[0]->getLocation(); } @@ -9363,6 +9463,44 @@ void Sema::ActOnFinishCXXMemberDecls() { } } +static void getDefaultArgExprsForConstructors(Sema &S, CXXRecordDecl *Class) { + // Don't do anything for template patterns. + if (Class->getDescribedClassTemplate()) + return; + + for (Decl *Member : Class->decls()) { + auto *CD = dyn_cast<CXXConstructorDecl>(Member); + if (!CD) { + // Recurse on nested classes. + if (auto *NestedRD = dyn_cast<CXXRecordDecl>(Member)) + getDefaultArgExprsForConstructors(S, NestedRD); + continue; + } else if (!CD->isDefaultConstructor() || !CD->hasAttr<DLLExportAttr>()) { + continue; + } + + for (unsigned I = 0, E = CD->getNumParams(); I != E; ++I) { + // Skip any default arguments that we've already instantiated. + if (S.Context.getDefaultArgExprForConstructor(CD, I)) + continue; + + Expr *DefaultArg = S.BuildCXXDefaultArgExpr(Class->getLocation(), CD, + CD->getParamDecl(I)).get(); + S.Context.addDefaultArgExprForConstructor(CD, I, DefaultArg); + } + } +} + +void Sema::ActOnFinishCXXMemberDefaultArgs(Decl *D) { + auto *RD = dyn_cast<CXXRecordDecl>(D); + + // Default constructors that are annotated with __declspec(dllexport) which + // have default arguments or don't use the standard calling convention are + // wrapped with a thunk called the default constructor closure. + if (RD && Context.getTargetInfo().getCXXABI().isMicrosoft()) + getDefaultArgExprsForConstructors(*this, RD); +} + void Sema::AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl, CXXDestructorDecl *Destructor) { assert(getLangOpts().CPlusPlus11 && @@ -9397,8 +9535,8 @@ namespace { // copy/move operators. These classes serve as factory functions and help us // avoid using the same Expr* in the AST twice. class ExprBuilder { - ExprBuilder(const ExprBuilder&) LLVM_DELETED_FUNCTION; - ExprBuilder &operator=(const ExprBuilder&) LLVM_DELETED_FUNCTION; + ExprBuilder(const ExprBuilder&) = delete; + ExprBuilder &operator=(const ExprBuilder&) = delete; protected: static Expr *assertNotNull(Expr *E) { @@ -10101,7 +10239,9 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation, // Assign non-static members. for (auto *Field : ClassDecl->fields()) { - if (Field->isUnnamedBitfield()) + // FIXME: We should form some kind of AST representation for the implied + // memcpy in a union copy operation. + if (Field->isUnnamedBitfield() || Field->getParent()->isUnion()) continue; if (Field->isInvalidDecl()) { @@ -10531,7 +10671,9 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation, // Assign non-static members. for (auto *Field : ClassDecl->fields()) { - if (Field->isUnnamedBitfield()) + // FIXME: We should form some kind of AST representation for the implied + // memcpy in a union copy operation. + if (Field->isUnnamedBitfield() || Field->getParent()->isUnion()) continue; if (Field->isInvalidDecl()) { @@ -11881,7 +12023,7 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S, // // We just pretend to initialize the object with itself, then make sure // it can be destroyed later. - QualType initType = ExDeclType; + QualType initType = Context.getExceptionObjectType(ExDeclType); InitializedEntity entity = InitializedEntity::InitializeVariable(ExDecl); @@ -13122,7 +13264,8 @@ bool Sema::DefineUsedVTables() { DefinedAnything = true; MarkVirtualMembersReferenced(Loc, Class); CXXRecordDecl *Canonical = cast<CXXRecordDecl>(Class->getCanonicalDecl()); - Consumer.HandleVTable(Class, VTablesUsed[Canonical]); + if (VTablesUsed[Canonical]) + Consumer.HandleVTable(Class); // Optionally warn if we're emitting a weak vtable. if (Class->isExternallyVisible() && diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp index 7e3da94..3831879 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp @@ -448,6 +448,19 @@ class ObjCInterfaceValidatorCCC : public CorrectionCandidateCallback { } +static void diagnoseUseOfProtocols(Sema &TheSema, + ObjCContainerDecl *CD, + ObjCProtocolDecl *const *ProtoRefs, + unsigned NumProtoRefs, + const SourceLocation *ProtoLocs) { + assert(ProtoRefs); + // Diagnose availability in the context of the ObjC container. + Sema::ContextRAII SavedContext(TheSema, CD); + for (unsigned i = 0; i < NumProtoRefs; ++i) { + (void)TheSema.DiagnoseUseOfDecl(ProtoRefs[i], ProtoLocs[i]); + } +} + Decl *Sema:: ActOnStartClassInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, @@ -535,6 +548,8 @@ ActOnStartClassInterface(SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *SuperClassDecl = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl); + // Diagnose availability in the context of the @interface. + ContextRAII SavedContext(*this, IDecl); // Diagnose classes that inherit from deprecated classes. if (SuperClassDecl) (void)DiagnoseUseOfDecl(SuperClassDecl, SuperLoc); @@ -591,6 +606,8 @@ ActOnStartClassInterface(SourceLocation AtInterfaceLoc, // Check then save referenced protocols. if (NumProtoRefs) { + diagnoseUseOfProtocols(*this, IDecl, (ObjCProtocolDecl*const*)ProtoRefs, + NumProtoRefs, ProtoLocs); IDecl->setProtocolList((ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs, ProtoLocs, Context); IDecl->setEndOfDefinitionLoc(EndProtoLoc); @@ -617,8 +634,7 @@ void Sema::ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, QualType T = TDecl->getUnderlyingType(); if (T->isObjCObjectType()) if (const ObjCObjectType *OPT = T->getAs<ObjCObjectType>()) - for (auto *I : OPT->quals()) - ProtocolRefs.push_back(I); + ProtocolRefs.append(OPT->qual_begin(), OPT->qual_end()); } } @@ -752,6 +768,8 @@ Sema::ActOnStartProtocolInterface(SourceLocation AtProtoInterfaceLoc, if (!err && NumProtoRefs ) { /// Check then save referenced protocols. + diagnoseUseOfProtocols(*this, PDecl, (ObjCProtocolDecl*const*)ProtoRefs, + NumProtoRefs, ProtoLocs); PDecl->setProtocolList((ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs, ProtoLocs, Context); } @@ -779,7 +797,7 @@ static bool NestedProtocolHasNoDefinition(ObjCProtocolDecl *PDecl, /// issues an error if they are not declared. It returns list of /// protocol declarations in its 'Protocols' argument. void -Sema::FindProtocolDeclaration(bool WarnOnDeclarations, +Sema::FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, const IdentifierLocPair *ProtocolId, unsigned NumProtocols, SmallVectorImpl<Decl *> &Protocols) { @@ -805,8 +823,12 @@ Sema::FindProtocolDeclaration(bool WarnOnDeclarations, // If this is a forward protocol declaration, get its definition. if (!PDecl->isThisDeclarationADefinition() && PDecl->getDefinition()) PDecl = PDecl->getDefinition(); - - (void)DiagnoseUseOfDecl(PDecl, ProtocolId[i].second); + + // For an objc container, delay protocol reference checking until after we + // can set the objc decl as the availability context, otherwise check now. + if (!ForObjCContainer) { + (void)DiagnoseUseOfDecl(PDecl, ProtocolId[i].second); + } // If this is a forward declaration and we are supposed to warn in this // case, do it. @@ -935,7 +957,9 @@ ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, CurContext->addDecl(CDecl); if (NumProtoRefs) { - CDecl->setProtocolList((ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs, + diagnoseUseOfProtocols(*this, CDecl, (ObjCProtocolDecl*const*)ProtoRefs, + NumProtoRefs, ProtoLocs); + CDecl->setProtocolList((ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs, ProtoLocs, Context); // Protocols in the class extension belong to the class. if (CDecl->IsClassExtension()) @@ -2241,8 +2265,14 @@ void Sema::addMethodToGlobalList(ObjCMethodList *List, if (getLangOpts().Modules && !getLangOpts().CurrentModule.empty()) continue; - if (!MatchTwoMethodDeclarations(Method, List->getMethod())) + if (!MatchTwoMethodDeclarations(Method, List->getMethod())) { + // Even if two method types do not match, we would like to say + // there is more than one declaration so unavailability/deprecated + // warning is not too noisy. + if (!Method->isDefined()) + List->setHasMoreThanOneDecl(true); continue; + } ObjCMethodDecl *PrevObjCMethod = List->getMethod(); @@ -2341,19 +2371,33 @@ bool Sema::CollectMultipleMethodsInGlobalPool( return Methods.size() > 1; } -bool Sema::AreMultipleMethodsInGlobalPool(Selector Sel, bool instance) { +bool Sema::AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, + SourceRange R, + bool receiverIdOrClass) { GlobalMethodPool::iterator Pos = MethodPool.find(Sel); // Test for no method in the pool which should not trigger any warning by // caller. if (Pos == MethodPool.end()) return true; - ObjCMethodList &MethList = instance ? Pos->second.first : Pos->second.second; + ObjCMethodList &MethList = + BestMethod->isInstanceMethod() ? Pos->second.first : Pos->second.second; + + // Diagnose finding more than one method in global pool + SmallVector<ObjCMethodDecl *, 4> Methods; + Methods.push_back(BestMethod); + for (ObjCMethodList *ML = &MethList; ML; ML = ML->getNext()) + if (ObjCMethodDecl *M = ML->getMethod()) + if (!M->isHidden() && M != BestMethod && !M->hasAttr<UnavailableAttr>()) + Methods.push_back(M); + if (Methods.size() > 1) + DiagnoseMultipleMethodInGlobalPool(Methods, Sel, R, receiverIdOrClass); + return MethList.hasMoreThanOneDecl(); } ObjCMethodDecl *Sema::LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, - bool warn, bool instance) { + bool instance) { if (ExternalSource) ReadMethodPool(Sel); @@ -2365,31 +2409,23 @@ ObjCMethodDecl *Sema::LookupMethodInGlobalPool(Selector Sel, SourceRange R, ObjCMethodList &MethList = instance ? Pos->second.first : Pos->second.second; SmallVector<ObjCMethodDecl *, 4> Methods; for (ObjCMethodList *M = &MethList; M; M = M->getNext()) { - if (M->getMethod() && !M->getMethod()->isHidden()) { - // If we're not supposed to warn about mismatches, we're done. - if (!warn) - return M->getMethod(); - - Methods.push_back(M->getMethod()); - } + if (M->getMethod() && !M->getMethod()->isHidden()) + return M->getMethod(); } + return nullptr; +} - // If there aren't any visible methods, we're done. - // FIXME: Recover if there are any known-but-hidden methods? - if (Methods.empty()) - return nullptr; - - if (Methods.size() == 1) - return Methods[0]; - +void Sema::DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, + Selector Sel, SourceRange R, + bool receiverIdOrClass) { // We found multiple methods, so we may have to complain. bool issueDiagnostic = false, issueError = false; // We support a warning which complains about *any* difference in // method signature. bool strictSelectorMatch = - receiverIdOrClass && warn && - !Diags.isIgnored(diag::warn_strict_multiple_method_decl, R.getBegin()); + receiverIdOrClass && + !Diags.isIgnored(diag::warn_strict_multiple_method_decl, R.getBegin()); if (strictSelectorMatch) { for (unsigned I = 1, N = Methods.size(); I != N; ++I) { if (!MatchTwoMethodDeclarations(Methods[0], Methods[I], MMS_strict)) { @@ -2414,7 +2450,7 @@ ObjCMethodDecl *Sema::LookupMethodInGlobalPool(Selector Sel, SourceRange R, break; } } - + if (issueDiagnostic) { if (issueError) Diag(R.getBegin(), diag::err_arc_multiple_method_decl) << Sel << R; @@ -2422,16 +2458,15 @@ ObjCMethodDecl *Sema::LookupMethodInGlobalPool(Selector Sel, SourceRange R, Diag(R.getBegin(), diag::warn_strict_multiple_method_decl) << Sel << R; else Diag(R.getBegin(), diag::warn_multiple_method_decl) << Sel << R; - + Diag(Methods[0]->getLocStart(), issueError ? diag::note_possibility : diag::note_using) - << Methods[0]->getSourceRange(); + << Methods[0]->getSourceRange(); for (unsigned I = 1, N = Methods.size(); I != N; ++I) { Diag(Methods[I]->getLocStart(), diag::note_also_found) - << Methods[I]->getSourceRange(); - } + << Methods[I]->getSourceRange(); + } } - return Methods[0]; } ObjCMethodDecl *Sema::LookupImplementedMethodInGlobalPool(Selector Sel) { @@ -2442,12 +2477,16 @@ ObjCMethodDecl *Sema::LookupImplementedMethodInGlobalPool(Selector Sel) { GlobalMethods &Methods = Pos->second; for (const ObjCMethodList *Method = &Methods.first; Method; Method = Method->getNext()) - if (Method->getMethod() && Method->getMethod()->isDefined()) + if (Method->getMethod() && + (Method->getMethod()->isDefined() || + Method->getMethod()->isPropertyAccessor())) return Method->getMethod(); for (const ObjCMethodList *Method = &Methods.second; Method; Method = Method->getNext()) - if (Method->getMethod() && Method->getMethod()->isDefined()) + if (Method->getMethod() && + (Method->getMethod()->isDefined() || + Method->getMethod()->isPropertyAccessor())) return Method->getMethod(); return nullptr; } @@ -2571,10 +2610,9 @@ Sema::ObjCContainerKind Sema::getObjCContainerKind() const { case Decl::ObjCProtocol: return Sema::OCK_Protocol; case Decl::ObjCCategory: - if (dyn_cast<ObjCCategoryDecl>(CurContext)->IsClassExtension()) + if (cast<ObjCCategoryDecl>(CurContext)->IsClassExtension()) return Sema::OCK_ClassExtension; - else - return Sema::OCK_Category; + return Sema::OCK_Category; case Decl::ObjCImplementation: return Sema::OCK_Implementation; case Decl::ObjCCategoryImpl: @@ -3286,7 +3324,7 @@ Decl *Sema::ActOnMethodDeclaration( case OMF_alloc: case OMF_new: - InferRelatedResultType = ObjCMethod->isClassMethod(); + InferRelatedResultType = ObjCMethod->isClassMethod(); break; case OMF_init: @@ -3297,7 +3335,8 @@ Decl *Sema::ActOnMethodDeclaration( break; } - if (InferRelatedResultType) + if (InferRelatedResultType && + !ObjCMethod->getReturnType()->isObjCIndependentClassType()) ObjCMethod->SetRelatedResultType(); } @@ -3487,12 +3526,11 @@ void Sema::DiagnoseUseOfUnimplementedSelectors() { if (ReferencedSelectors.empty() || !Context.AnyObjCImplementation()) return; - for (llvm::DenseMap<Selector, SourceLocation>::iterator S = - ReferencedSelectors.begin(), - E = ReferencedSelectors.end(); S != E; ++S) { - Selector Sel = (*S).first; + for (auto &SelectorAndLocation : ReferencedSelectors) { + Selector Sel = SelectorAndLocation.first; + SourceLocation Loc = SelectorAndLocation.second; if (!LookupImplementedMethodInGlobalPool(Sel)) - Diag((*S).second, diag::warn_unimplemented_selector) << Sel; + Diag(Loc, diag::warn_unimplemented_selector) << Sel; } return; } diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp index 2387325..51d6ace 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp @@ -167,13 +167,13 @@ Sema::ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT) { void Sema::UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI) { - for (auto *Redecl : FD->redecls()) - Context.adjustExceptionSpec(cast<FunctionDecl>(Redecl), ESI); - // If we've fully resolved the exception specification, notify listeners. if (!isUnresolvedExceptionSpec(ESI.Type)) if (auto *Listener = getASTMutationListener()) Listener->ResolvedExceptionSpec(FD); + + for (auto *Redecl : FD->redecls()) + Context.adjustExceptionSpec(cast<FunctionDecl>(Redecl), ESI); } /// Determine whether a function has an implicitly-generated exception @@ -437,7 +437,7 @@ bool Sema::CheckEquivalentExceptionSpec(const PartialDiagnostic &DiagID, OldNR != FunctionProtoType::NR_NoNoexcept && NewNR != FunctionProtoType::NR_NoNoexcept) { Diag(NewLoc, DiagID); - if (NoteID.getDiagID() != 0) + if (NoteID.getDiagID() != 0 && OldLoc.isValid()) Diag(OldLoc, NoteID); return true; } @@ -518,7 +518,7 @@ bool Sema::CheckEquivalentExceptionSpec(const PartialDiagnostic &DiagID, } Diag(NewLoc, DiagID); - if (NoteID.getDiagID() != 0) + if (NoteID.getDiagID() != 0 && OldLoc.isValid()) Diag(OldLoc, NoteID); return true; } @@ -547,7 +547,7 @@ bool Sema::CheckEquivalentExceptionSpec(const PartialDiagnostic &DiagID, return false; } Diag(NewLoc, DiagID); - if (NoteID.getDiagID() != 0) + if (NoteID.getDiagID() != 0 && OldLoc.isValid()) Diag(OldLoc, NoteID); return true; } diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp index 8be1157..7ab269c 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp @@ -76,25 +76,57 @@ bool Sema::CanUseDecl(NamedDecl *D) { static void DiagnoseUnusedOfDecl(Sema &S, NamedDecl *D, SourceLocation Loc) { // Warn if this is used but marked unused. if (D->hasAttr<UnusedAttr>()) { - const Decl *DC = cast<Decl>(S.getCurObjCLexicalContext()); - if (!DC->hasAttr<UnusedAttr>()) + const Decl *DC = cast_or_null<Decl>(S.getCurObjCLexicalContext()); + if (DC && !DC->hasAttr<UnusedAttr>()) S.Diag(Loc, diag::warn_used_but_marked_unused) << D->getDeclName(); } } -static AvailabilityResult DiagnoseAvailabilityOfDecl(Sema &S, - NamedDecl *D, SourceLocation Loc, - const ObjCInterfaceDecl *UnknownObjCClass, - bool ObjCPropertyAccess) { +static bool HasRedeclarationWithoutAvailabilityInCategory(const Decl *D) { + const auto *OMD = dyn_cast<ObjCMethodDecl>(D); + if (!OMD) + return false; + const ObjCInterfaceDecl *OID = OMD->getClassInterface(); + if (!OID) + return false; + + for (const ObjCCategoryDecl *Cat : OID->visible_categories()) + if (ObjCMethodDecl *CatMeth = + Cat->getMethod(OMD->getSelector(), OMD->isInstanceMethod())) + if (!CatMeth->hasAttr<AvailabilityAttr>()) + return true; + return false; +} + +static AvailabilityResult +DiagnoseAvailabilityOfDecl(Sema &S, NamedDecl *D, SourceLocation Loc, + const ObjCInterfaceDecl *UnknownObjCClass, + bool ObjCPropertyAccess) { // See if this declaration is unavailable or deprecated. std::string Message; + AvailabilityResult Result = D->getAvailability(&Message); + + // For typedefs, if the typedef declaration appears available look + // to the underlying type to see if it is more restrictive. + while (const TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) { + if (Result == AR_Available) { + if (const TagType *TT = TD->getUnderlyingType()->getAs<TagType>()) { + D = TT->getDecl(); + Result = D->getAvailability(&Message); + continue; + } + } + break; + } // Forward class declarations get their attributes from their definition. if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(D)) { - if (IDecl->getDefinition()) + if (IDecl->getDefinition()) { D = IDecl->getDefinition(); + Result = D->getAvailability(&Message); + } } - AvailabilityResult Result = D->getAvailability(&Message); + if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(D)) if (Result == AR_Available) { const DeclContext *DC = ECD->getDeclContext(); @@ -103,7 +135,8 @@ static AvailabilityResult DiagnoseAvailabilityOfDecl(Sema &S, } const ObjCPropertyDecl *ObjCPDecl = nullptr; - if (Result == AR_Deprecated || Result == AR_Unavailable) { + if (Result == AR_Deprecated || Result == AR_Unavailable || + AR_NotYetIntroduced) { if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) { if (const ObjCPropertyDecl *PD = MD->findPropertyDecl()) { AvailabilityResult PDeclResult = PD->getAvailability(nullptr); @@ -115,7 +148,6 @@ static AvailabilityResult DiagnoseAvailabilityOfDecl(Sema &S, switch (Result) { case AR_Available: - case AR_NotYetIntroduced: break; case AR_Deprecated: @@ -125,6 +157,34 @@ static AvailabilityResult DiagnoseAvailabilityOfDecl(Sema &S, ObjCPropertyAccess); break; + case AR_NotYetIntroduced: { + // Don't do this for enums, they can't be redeclared. + if (isa<EnumConstantDecl>(D) || isa<EnumDecl>(D)) + break; + + bool Warn = !D->getAttr<AvailabilityAttr>()->isInherited(); + // Objective-C method declarations in categories are not modelled as + // redeclarations, so manually look for a redeclaration in a category + // if necessary. + if (Warn && HasRedeclarationWithoutAvailabilityInCategory(D)) + Warn = false; + // In general, D will point to the most recent redeclaration. However, + // for `@class A;` decls, this isn't true -- manually go through the + // redecl chain in that case. + if (Warn && isa<ObjCInterfaceDecl>(D)) + for (Decl *Redecl = D->getMostRecentDecl(); Redecl && Warn; + Redecl = Redecl->getPreviousDecl()) + if (!Redecl->hasAttr<AvailabilityAttr>() || + Redecl->getAttr<AvailabilityAttr>()->isInherited()) + Warn = false; + + if (Warn) + S.EmitAvailabilityWarning(Sema::AD_Partial, D, Message, Loc, + UnknownObjCClass, ObjCPDecl, + ObjCPropertyAccess); + break; + } + case AR_Unavailable: if (S.getCurContextAvailability() != AR_Unavailable) S.EmitAvailabilityWarning(Sema::AD_Unavailable, @@ -307,7 +367,8 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc, DeduceReturnType(FD, Loc)) return true; } - DiagnoseAvailabilityOfDecl(*this, D, Loc, UnknownObjCClass, ObjCPropertyAccess); + DiagnoseAvailabilityOfDecl(*this, D, Loc, UnknownObjCClass, + ObjCPropertyAccess); DiagnoseUnusedOfDecl(*this, D, Loc); @@ -405,12 +466,11 @@ void Sema::DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, SourceLocation MissingNilLoc = PP.getLocForEndOfToken(sentinelExpr->getLocEnd()); std::string NullValue; - if (calleeType == CT_Method && - PP.getIdentifierInfo("nil")->hasMacroDefinition()) + if (calleeType == CT_Method && PP.isMacroDefined("nil")) NullValue = "nil"; else if (getLangOpts().CPlusPlus11) NullValue = "nullptr"; - else if (PP.getIdentifierInfo("NULL")->hasMacroDefinition()) + else if (PP.isMacroDefined("NULL")) NullValue = "NULL"; else NullValue = "(void*) 0"; @@ -3266,7 +3326,9 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) { if (Ty == Context.DoubleTy) { if (getLangOpts().SinglePrecisionConstants) { Res = ImpCastExprToType(Res, Context.FloatTy, CK_FloatingCast).get(); - } else if (getLangOpts().OpenCL && !getOpenCLOptions().cl_khr_fp64) { + } else if (getLangOpts().OpenCL && + !((getLangOpts().OpenCLVersion >= 120) || + getOpenCLOptions().cl_khr_fp64)) { Diag(Tok.getLocation(), diag::warn_double_const_requires_fp64); Res = ImpCastExprToType(Res, Context.FloatTy, CK_FloatingCast).get(); } @@ -3322,6 +3384,9 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) { Diag(Tok.getLocation(), diag::err_int128_unsupported); Width = MaxWidth; Ty = Context.getIntMaxType(); + } else if (Literal.MicrosoftInteger == 8 && !Literal.isUnsigned) { + Width = 8; + Ty = Context.CharTy; } else { Width = Literal.MicrosoftInteger; Ty = Context.getIntTypeForBitwidth(Width, @@ -4558,6 +4623,83 @@ static bool checkArgsForPlaceholders(Sema &S, MultiExprArg args) { return hasInvalid; } +/// If a builtin function has a pointer argument with no explicit address +/// space, than it should be able to accept a pointer to any address +/// space as input. In order to do this, we need to replace the +/// standard builtin declaration with one that uses the same address space +/// as the call. +/// +/// \returns nullptr If this builtin is not a candidate for a rewrite i.e. +/// it does not contain any pointer arguments without +/// an address space qualifer. Otherwise the rewritten +/// FunctionDecl is returned. +/// TODO: Handle pointer return types. +static FunctionDecl *rewriteBuiltinFunctionDecl(Sema *Sema, ASTContext &Context, + const FunctionDecl *FDecl, + MultiExprArg ArgExprs) { + + QualType DeclType = FDecl->getType(); + const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(DeclType); + + if (!Context.BuiltinInfo.hasPtrArgsOrResult(FDecl->getBuiltinID()) || + !FT || FT->isVariadic() || ArgExprs.size() != FT->getNumParams()) + return nullptr; + + bool NeedsNewDecl = false; + unsigned i = 0; + SmallVector<QualType, 8> OverloadParams; + + for (QualType ParamType : FT->param_types()) { + + // Convert array arguments to pointer to simplify type lookup. + Expr *Arg = Sema->DefaultFunctionArrayLvalueConversion(ArgExprs[i++]).get(); + QualType ArgType = Arg->getType(); + if (!ParamType->isPointerType() || + ParamType.getQualifiers().hasAddressSpace() || + !ArgType->isPointerType() || + !ArgType->getPointeeType().getQualifiers().hasAddressSpace()) { + OverloadParams.push_back(ParamType); + continue; + } + + NeedsNewDecl = true; + unsigned AS = ArgType->getPointeeType().getQualifiers().getAddressSpace(); + + QualType PointeeType = ParamType->getPointeeType(); + PointeeType = Context.getAddrSpaceQualType(PointeeType, AS); + OverloadParams.push_back(Context.getPointerType(PointeeType)); + } + + if (!NeedsNewDecl) + return nullptr; + + FunctionProtoType::ExtProtoInfo EPI; + QualType OverloadTy = Context.getFunctionType(FT->getReturnType(), + OverloadParams, EPI); + DeclContext *Parent = Context.getTranslationUnitDecl(); + FunctionDecl *OverloadDecl = FunctionDecl::Create(Context, Parent, + FDecl->getLocation(), + FDecl->getLocation(), + FDecl->getIdentifier(), + OverloadTy, + /*TInfo=*/nullptr, + SC_Extern, false, + /*hasPrototype=*/true); + SmallVector<ParmVarDecl*, 16> Params; + FT = cast<FunctionProtoType>(OverloadTy); + for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) { + QualType ParamType = FT->getParamType(i); + ParmVarDecl *Parm = + ParmVarDecl::Create(Context, OverloadDecl, SourceLocation(), + SourceLocation(), nullptr, ParamType, + /*TInfo=*/nullptr, SC_None, nullptr); + Parm->setScopeInfo(0, i); + Params.push_back(Parm); + } + OverloadDecl->setParams(Params); + return OverloadDecl; +} + /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. @@ -4661,10 +4803,24 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(NakedFn)) if (UnOp->getOpcode() == UO_AddrOf) NakedFn = UnOp->getSubExpr()->IgnoreParens(); - - if (isa<DeclRefExpr>(NakedFn)) + + if (isa<DeclRefExpr>(NakedFn)) { NDecl = cast<DeclRefExpr>(NakedFn)->getDecl(); - else if (isa<MemberExpr>(NakedFn)) + + FunctionDecl *FDecl = dyn_cast<FunctionDecl>(NDecl); + if (FDecl && FDecl->getBuiltinID()) { + // Rewrite the function decl for this builtin by replacing paramaters + // with no explicit address space with the address space of the arguments + // in ArgExprs. + if ((FDecl = rewriteBuiltinFunctionDecl(this, Context, FDecl, ArgExprs))) { + NDecl = FDecl; + Fn = DeclRefExpr::Create(Context, FDecl->getQualifierLoc(), + SourceLocation(), FDecl, false, + SourceLocation(), FDecl->getType(), + Fn->getValueKind(), FDecl); + } + } + } else if (isa<MemberExpr>(NakedFn)) NDecl = cast<MemberExpr>(NakedFn)->getMemberDecl(); if (FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(NDecl)) { @@ -5516,47 +5672,24 @@ bool Sema::DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, } /// \brief Return false if the condition expression is valid, true otherwise. -static bool checkCondition(Sema &S, Expr *Cond) { +static bool checkCondition(Sema &S, Expr *Cond, SourceLocation QuestionLoc) { QualType CondTy = Cond->getType(); + // OpenCL v1.1 s6.3.i says the condition cannot be a floating point type. + if (S.getLangOpts().OpenCL && CondTy->isFloatingType()) { + S.Diag(QuestionLoc, diag::err_typecheck_cond_expect_nonfloat) + << CondTy << Cond->getSourceRange(); + return true; + } + // C99 6.5.15p2 if (CondTy->isScalarType()) return false; - // OpenCL v1.1 s6.3.i says the condition is allowed to be a vector or scalar. - if (S.getLangOpts().OpenCL && CondTy->isVectorType()) - return false; - - // Emit the proper error message. - S.Diag(Cond->getLocStart(), S.getLangOpts().OpenCL ? - diag::err_typecheck_cond_expect_scalar : - diag::err_typecheck_cond_expect_scalar_or_vector) - << CondTy; + S.Diag(QuestionLoc, diag::err_typecheck_cond_expect_scalar) + << CondTy << Cond->getSourceRange(); return true; } -/// \brief Return false if the two expressions can be converted to a vector, -/// true otherwise -static bool checkConditionalConvertScalarsToVectors(Sema &S, ExprResult &LHS, - ExprResult &RHS, - QualType CondTy) { - // Both operands should be of scalar type. - if (!LHS.get()->getType()->isScalarType()) { - S.Diag(LHS.get()->getLocStart(), diag::err_typecheck_cond_expect_scalar) - << CondTy; - return true; - } - if (!RHS.get()->getType()->isScalarType()) { - S.Diag(RHS.get()->getLocStart(), diag::err_typecheck_cond_expect_scalar) - << CondTy; - return true; - } - - // Implicity convert these scalars to the type of the condition. - LHS = S.ImpCastExprToType(LHS.get(), CondTy, CK_IntegralCast); - RHS = S.ImpCastExprToType(RHS.get(), CondTy, CK_IntegralCast); - return false; -} - /// \brief Handle when one or both operands are void type. static QualType checkConditionalVoidType(Sema &S, ExprResult &LHS, ExprResult &RHS) { @@ -5773,6 +5906,184 @@ static bool checkPointerIntegerMismatch(Sema &S, ExprResult &Int, return true; } +/// \brief Simple conversion between integer and floating point types. +/// +/// Used when handling the OpenCL conditional operator where the +/// condition is a vector while the other operands are scalar. +/// +/// OpenCL v1.1 s6.3.i and s6.11.6 together require that the scalar +/// types are either integer or floating type. Between the two +/// operands, the type with the higher rank is defined as the "result +/// type". The other operand needs to be promoted to the same type. No +/// other type promotion is allowed. We cannot use +/// UsualArithmeticConversions() for this purpose, since it always +/// promotes promotable types. +static QualType OpenCLArithmeticConversions(Sema &S, ExprResult &LHS, + ExprResult &RHS, + SourceLocation QuestionLoc) { + LHS = S.DefaultFunctionArrayLvalueConversion(LHS.get()); + if (LHS.isInvalid()) + return QualType(); + RHS = S.DefaultFunctionArrayLvalueConversion(RHS.get()); + if (RHS.isInvalid()) + return QualType(); + + // For conversion purposes, we ignore any qualifiers. + // For example, "const float" and "float" are equivalent. + QualType LHSType = + S.Context.getCanonicalType(LHS.get()->getType()).getUnqualifiedType(); + QualType RHSType = + S.Context.getCanonicalType(RHS.get()->getType()).getUnqualifiedType(); + + if (!LHSType->isIntegerType() && !LHSType->isRealFloatingType()) { + S.Diag(QuestionLoc, diag::err_typecheck_cond_expect_int_float) + << LHSType << LHS.get()->getSourceRange(); + return QualType(); + } + + if (!RHSType->isIntegerType() && !RHSType->isRealFloatingType()) { + S.Diag(QuestionLoc, diag::err_typecheck_cond_expect_int_float) + << RHSType << RHS.get()->getSourceRange(); + return QualType(); + } + + // If both types are identical, no conversion is needed. + if (LHSType == RHSType) + return LHSType; + + // Now handle "real" floating types (i.e. float, double, long double). + if (LHSType->isRealFloatingType() || RHSType->isRealFloatingType()) + return handleFloatConversion(S, LHS, RHS, LHSType, RHSType, + /*IsCompAssign = */ false); + + // Finally, we have two differing integer types. + return handleIntegerConversion<doIntegralCast, doIntegralCast> + (S, LHS, RHS, LHSType, RHSType, /*IsCompAssign = */ false); +} + +/// \brief Convert scalar operands to a vector that matches the +/// condition in length. +/// +/// Used when handling the OpenCL conditional operator where the +/// condition is a vector while the other operands are scalar. +/// +/// We first compute the "result type" for the scalar operands +/// according to OpenCL v1.1 s6.3.i. Both operands are then converted +/// into a vector of that type where the length matches the condition +/// vector type. s6.11.6 requires that the element types of the result +/// and the condition must have the same number of bits. +static QualType +OpenCLConvertScalarsToVectors(Sema &S, ExprResult &LHS, ExprResult &RHS, + QualType CondTy, SourceLocation QuestionLoc) { + QualType ResTy = OpenCLArithmeticConversions(S, LHS, RHS, QuestionLoc); + if (ResTy.isNull()) return QualType(); + + const VectorType *CV = CondTy->getAs<VectorType>(); + assert(CV); + + // Determine the vector result type + unsigned NumElements = CV->getNumElements(); + QualType VectorTy = S.Context.getExtVectorType(ResTy, NumElements); + + // Ensure that all types have the same number of bits + if (S.Context.getTypeSize(CV->getElementType()) + != S.Context.getTypeSize(ResTy)) { + // Since VectorTy is created internally, it does not pretty print + // with an OpenCL name. Instead, we just print a description. + std::string EleTyName = ResTy.getUnqualifiedType().getAsString(); + SmallString<64> Str; + llvm::raw_svector_ostream OS(Str); + OS << "(vector of " << NumElements << " '" << EleTyName << "' values)"; + S.Diag(QuestionLoc, diag::err_conditional_vector_element_size) + << CondTy << OS.str(); + return QualType(); + } + + // Convert operands to the vector result type + LHS = S.ImpCastExprToType(LHS.get(), VectorTy, CK_VectorSplat); + RHS = S.ImpCastExprToType(RHS.get(), VectorTy, CK_VectorSplat); + + return VectorTy; +} + +/// \brief Return false if this is a valid OpenCL condition vector +static bool checkOpenCLConditionVector(Sema &S, Expr *Cond, + SourceLocation QuestionLoc) { + // OpenCL v1.1 s6.11.6 says the elements of the vector must be of + // integral type. + const VectorType *CondTy = Cond->getType()->getAs<VectorType>(); + assert(CondTy); + QualType EleTy = CondTy->getElementType(); + if (EleTy->isIntegerType()) return false; + + S.Diag(QuestionLoc, diag::err_typecheck_cond_expect_nonfloat) + << Cond->getType() << Cond->getSourceRange(); + return true; +} + +/// \brief Return false if the vector condition type and the vector +/// result type are compatible. +/// +/// OpenCL v1.1 s6.11.6 requires that both vector types have the same +/// number of elements, and their element types have the same number +/// of bits. +static bool checkVectorResult(Sema &S, QualType CondTy, QualType VecResTy, + SourceLocation QuestionLoc) { + const VectorType *CV = CondTy->getAs<VectorType>(); + const VectorType *RV = VecResTy->getAs<VectorType>(); + assert(CV && RV); + + if (CV->getNumElements() != RV->getNumElements()) { + S.Diag(QuestionLoc, diag::err_conditional_vector_size) + << CondTy << VecResTy; + return true; + } + + QualType CVE = CV->getElementType(); + QualType RVE = RV->getElementType(); + + if (S.Context.getTypeSize(CVE) != S.Context.getTypeSize(RVE)) { + S.Diag(QuestionLoc, diag::err_conditional_vector_element_size) + << CondTy << VecResTy; + return true; + } + + return false; +} + +/// \brief Return the resulting type for the conditional operator in +/// OpenCL (aka "ternary selection operator", OpenCL v1.1 +/// s6.3.i) when the condition is a vector type. +static QualType +OpenCLCheckVectorConditional(Sema &S, ExprResult &Cond, + ExprResult &LHS, ExprResult &RHS, + SourceLocation QuestionLoc) { + Cond = S.DefaultFunctionArrayLvalueConversion(Cond.get()); + if (Cond.isInvalid()) + return QualType(); + QualType CondTy = Cond.get()->getType(); + + if (checkOpenCLConditionVector(S, Cond.get(), QuestionLoc)) + return QualType(); + + // If either operand is a vector then find the vector type of the + // result as specified in OpenCL v1.1 s6.3.i. + if (LHS.get()->getType()->isVectorType() || + RHS.get()->getType()->isVectorType()) { + QualType VecResTy = S.CheckVectorOperands(LHS, RHS, QuestionLoc, + /*isCompAssign*/false); + if (VecResTy.isNull()) return QualType(); + // The result type must match the condition type as specified in + // OpenCL v1.1 s6.11.6. + if (checkVectorResult(S, CondTy, VecResTy, QuestionLoc)) + return QualType(); + return VecResTy; + } + + // Both operands are scalar. + return OpenCLConvertScalarsToVectors(S, LHS, RHS, CondTy, QuestionLoc); +} + /// Note that LHS is not null here, even if this is the gnu "x ?: y" extension. /// In that case, LHS = cond. /// C99 6.5.15 @@ -5796,11 +6107,16 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, VK = VK_RValue; OK = OK_Ordinary; + // The OpenCL operator with a vector condition is sufficiently + // different to merit its own checker. + if (getLangOpts().OpenCL && Cond.get()->getType()->isVectorType()) + return OpenCLCheckVectorConditional(*this, Cond, LHS, RHS, QuestionLoc); + // First, check the condition. Cond = UsualUnaryConversions(Cond.get()); if (Cond.isInvalid()) return QualType(); - if (checkCondition(*this, Cond.get())) + if (checkCondition(*this, Cond.get(), QuestionLoc)) return QualType(); // Now check the two expressions. @@ -5812,17 +6128,9 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, if (LHS.isInvalid() || RHS.isInvalid()) return QualType(); - QualType CondTy = Cond.get()->getType(); QualType LHSTy = LHS.get()->getType(); QualType RHSTy = RHS.get()->getType(); - // If the condition is a vector, and both operands are scalar, - // attempt to implicity convert them to the vector type to act like the - // built in select. (OpenCL v1.1 s6.3.i) - if (getLangOpts().OpenCL && CondTy->isVectorType()) - if (checkConditionalConvertScalarsToVectors(*this, LHS, RHS, CondTy)) - return QualType(); - // If both operands have arithmetic type, do the usual arithmetic conversions // to find a common type: C99 6.5.15p3,5. if (LHSTy->isArithmeticType() && RHSTy->isArithmeticType()) { @@ -6116,6 +6424,8 @@ static bool ExprLooksBoolean(Expr *E) { return IsLogicOp(OP->getOpcode()); if (UnaryOperator *OP = dyn_cast<UnaryOperator>(E)) return OP->getOpcode() == UO_LNot; + if (E->getType()->isPointerType()) + return true; return false; } @@ -6218,6 +6528,8 @@ ExprResult Sema::ActOnConditionalOp(SourceLocation QuestionLoc, DiagnoseConditionalPrecedence(*this, QuestionLoc, Cond.get(), LHS.get(), RHS.get()); + CheckBoolLikeConversion(Cond.get(), QuestionLoc); + if (!commonExpr) return new (Context) ConditionalOperator(Cond.get(), QuestionLoc, LHS.get(), ColonLoc, @@ -7178,9 +7490,12 @@ static void diagnoseArithmeticOnFunctionPointer(Sema &S, SourceLocation Loc, /// \returns True if pointer has incomplete type static bool checkArithmeticIncompletePointerType(Sema &S, SourceLocation Loc, Expr *Operand) { - assert(Operand->getType()->isAnyPointerType() && - !Operand->getType()->isDependentType()); - QualType PointeeTy = Operand->getType()->getPointeeType(); + QualType ResType = Operand->getType(); + if (const AtomicType *ResAtomicType = ResType->getAs<AtomicType>()) + ResType = ResAtomicType->getValueType(); + + assert(ResType->isAnyPointerType() && !ResType->isDependentType()); + QualType PointeeTy = ResType->getPointeeType(); return S.RequireCompleteType(Loc, PointeeTy, diag::err_typecheck_arithmetic_incomplete_type, PointeeTy, Operand->getSourceRange()); @@ -7196,9 +7511,13 @@ static bool checkArithmeticIncompletePointerType(Sema &S, SourceLocation Loc, /// \returns True when the operand is valid to use (even if as an extension). static bool checkArithmeticOpPointerOperand(Sema &S, SourceLocation Loc, Expr *Operand) { - if (!Operand->getType()->isAnyPointerType()) return true; + QualType ResType = Operand->getType(); + if (const AtomicType *ResAtomicType = ResType->getAs<AtomicType>()) + ResType = ResAtomicType->getValueType(); - QualType PointeeTy = Operand->getType()->getPointeeType(); + if (!ResType->isAnyPointerType()) return true; + + QualType PointeeTy = ResType->getPointeeType(); if (PointeeTy->isVoidType()) { diagnoseArithmeticOnVoidPointer(S, Loc, Operand); return !S.getLangOpts().CPlusPlus; @@ -7558,7 +7877,7 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS, llvm::APSInt Right; // Check right/shifter operand if (RHS.get()->isValueDependent() || - !RHS.get()->isIntegerConstantExpr(Right, S.Context)) + !RHS.get()->EvaluateAsInt(Right, S.Context)) return; if (Right.isNegative()) { @@ -7605,7 +7924,7 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS, // turned off separately if needed. if (LeftBits == ResultBits - 1) { S.Diag(Loc, diag::warn_shift_result_sets_sign_bit) - << HexResult.str() << LHSType + << HexResult << LHSType << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return; } @@ -7616,6 +7935,69 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS, << RHS.get()->getSourceRange(); } +/// \brief Return the resulting type when an OpenCL vector is shifted +/// by a scalar or vector shift amount. +static QualType checkOpenCLVectorShift(Sema &S, + ExprResult &LHS, ExprResult &RHS, + SourceLocation Loc, bool IsCompAssign) { + // OpenCL v1.1 s6.3.j says RHS can be a vector only if LHS is a vector. + if (!LHS.get()->getType()->isVectorType()) { + S.Diag(Loc, diag::err_shift_rhs_only_vector) + << RHS.get()->getType() << LHS.get()->getType() + << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); + return QualType(); + } + + if (!IsCompAssign) { + LHS = S.UsualUnaryConversions(LHS.get()); + if (LHS.isInvalid()) return QualType(); + } + + RHS = S.UsualUnaryConversions(RHS.get()); + if (RHS.isInvalid()) return QualType(); + + QualType LHSType = LHS.get()->getType(); + const VectorType *LHSVecTy = LHSType->getAs<VectorType>(); + QualType LHSEleType = LHSVecTy->getElementType(); + + // Note that RHS might not be a vector. + QualType RHSType = RHS.get()->getType(); + const VectorType *RHSVecTy = RHSType->getAs<VectorType>(); + QualType RHSEleType = RHSVecTy ? RHSVecTy->getElementType() : RHSType; + + // OpenCL v1.1 s6.3.j says that the operands need to be integers. + if (!LHSEleType->isIntegerType()) { + S.Diag(Loc, diag::err_typecheck_expect_int) + << LHS.get()->getType() << LHS.get()->getSourceRange(); + return QualType(); + } + + if (!RHSEleType->isIntegerType()) { + S.Diag(Loc, diag::err_typecheck_expect_int) + << RHS.get()->getType() << RHS.get()->getSourceRange(); + return QualType(); + } + + if (RHSVecTy) { + // OpenCL v1.1 s6.3.j says that for vector types, the operators + // are applied component-wise. So if RHS is a vector, then ensure + // that the number of elements is the same as LHS... + if (RHSVecTy->getNumElements() != LHSVecTy->getNumElements()) { + S.Diag(Loc, diag::err_typecheck_vector_lengths_not_equal) + << LHS.get()->getType() << RHS.get()->getType() + << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); + return QualType(); + } + } else { + // ...else expand RHS to match the number of elements in LHS. + QualType VecTy = + S.Context.getExtVectorType(RHSEleType, LHSVecTy->getNumElements()); + RHS = S.ImpCastExprToType(RHS.get(), VecTy, CK_VectorSplat); + } + + return LHSType; +} + // C99 6.5.7 QualType Sema::CheckShiftOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, @@ -7624,8 +8006,11 @@ QualType Sema::CheckShiftOperands(ExprResult &LHS, ExprResult &RHS, // Vector shifts promote their scalar inputs to vector type. if (LHS.get()->getType()->isVectorType() || - RHS.get()->getType()->isVectorType()) + RHS.get()->getType()->isVectorType()) { + if (LangOpts.OpenCL) + return checkOpenCLVectorShift(*this, LHS, RHS, Loc, IsCompAssign); return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign); + } // Shifts don't perform usual arithmetic conversions, they just do integer // promotions on each operand. C99 6.5.7p3 @@ -7807,8 +8192,7 @@ static bool hasIsEqualMethod(Sema &S, const Expr *LHS, const Expr *RHS) { if (Type->isObjCIdType()) { // For 'id', just check the global pool. Method = S.LookupInstanceMethodInGlobalPool(IsEqualSel, SourceRange(), - /*receiverId=*/true, - /*warn=*/false); + /*receiverId=*/true); } else { // Check protocols. Method = S.LookupMethodInQualifiedType(IsEqualSel, Type, @@ -8614,6 +8998,139 @@ static NonConstCaptureKind isReferenceToNonConstCapture(Sema &S, Expr *E) { return (isa<BlockDecl>(DC) ? NCCK_Block : NCCK_Lambda); } +static bool IsTypeModifiable(QualType Ty, bool IsDereference) { + Ty = Ty.getNonReferenceType(); + if (IsDereference && Ty->isPointerType()) + Ty = Ty->getPointeeType(); + return !Ty.isConstQualified(); +} + +/// Emit the "read-only variable not assignable" error and print notes to give +/// more information about why the variable is not assignable, such as pointing +/// to the declaration of a const variable, showing that a method is const, or +/// that the function is returning a const reference. +static void DiagnoseConstAssignment(Sema &S, const Expr *E, + SourceLocation Loc) { + // Update err_typecheck_assign_const and note_typecheck_assign_const + // when this enum is changed. + enum { + ConstFunction, + ConstVariable, + ConstMember, + ConstMethod, + ConstUnknown, // Keep as last element + }; + + SourceRange ExprRange = E->getSourceRange(); + + // Only emit one error on the first const found. All other consts will emit + // a note to the error. + bool DiagnosticEmitted = false; + + // Track if the current expression is the result of a derefence, and if the + // next checked expression is the result of a derefence. + bool IsDereference = false; + bool NextIsDereference = false; + + // Loop to process MemberExpr chains. + while (true) { + IsDereference = NextIsDereference; + NextIsDereference = false; + + E = E->IgnoreParenImpCasts(); + if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { + NextIsDereference = ME->isArrow(); + const ValueDecl *VD = ME->getMemberDecl(); + if (const FieldDecl *Field = dyn_cast<FieldDecl>(VD)) { + // Mutable fields can be modified even if the class is const. + if (Field->isMutable()) { + assert(DiagnosticEmitted && "Expected diagnostic not emitted."); + break; + } + + if (!IsTypeModifiable(Field->getType(), IsDereference)) { + if (!DiagnosticEmitted) { + S.Diag(Loc, diag::err_typecheck_assign_const) + << ExprRange << ConstMember << false /*static*/ << Field + << Field->getType(); + DiagnosticEmitted = true; + } + S.Diag(VD->getLocation(), diag::note_typecheck_assign_const) + << ConstMember << false /*static*/ << Field << Field->getType() + << Field->getSourceRange(); + } + E = ME->getBase(); + continue; + } else if (const VarDecl *VDecl = dyn_cast<VarDecl>(VD)) { + if (VDecl->getType().isConstQualified()) { + if (!DiagnosticEmitted) { + S.Diag(Loc, diag::err_typecheck_assign_const) + << ExprRange << ConstMember << true /*static*/ << VDecl + << VDecl->getType(); + DiagnosticEmitted = true; + } + S.Diag(VD->getLocation(), diag::note_typecheck_assign_const) + << ConstMember << true /*static*/ << VDecl << VDecl->getType() + << VDecl->getSourceRange(); + } + // Static fields do not inherit constness from parents. + break; + } + break; + } // End MemberExpr + break; + } + + if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { + // Function calls + const FunctionDecl *FD = CE->getDirectCallee(); + if (!IsTypeModifiable(FD->getReturnType(), IsDereference)) { + if (!DiagnosticEmitted) { + S.Diag(Loc, diag::err_typecheck_assign_const) << ExprRange + << ConstFunction << FD; + DiagnosticEmitted = true; + } + S.Diag(FD->getReturnTypeSourceRange().getBegin(), + diag::note_typecheck_assign_const) + << ConstFunction << FD << FD->getReturnType() + << FD->getReturnTypeSourceRange(); + } + } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { + // Point to variable declaration. + if (const ValueDecl *VD = DRE->getDecl()) { + if (!IsTypeModifiable(VD->getType(), IsDereference)) { + if (!DiagnosticEmitted) { + S.Diag(Loc, diag::err_typecheck_assign_const) + << ExprRange << ConstVariable << VD << VD->getType(); + DiagnosticEmitted = true; + } + S.Diag(VD->getLocation(), diag::note_typecheck_assign_const) + << ConstVariable << VD << VD->getType() << VD->getSourceRange(); + } + } + } else if (isa<CXXThisExpr>(E)) { + if (const DeclContext *DC = S.getFunctionLevelDeclContext()) { + if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(DC)) { + if (MD->isConst()) { + if (!DiagnosticEmitted) { + S.Diag(Loc, diag::err_typecheck_assign_const) << ExprRange + << ConstMethod << MD; + DiagnosticEmitted = true; + } + S.Diag(MD->getLocation(), diag::note_typecheck_assign_const) + << ConstMethod << MD << MD->getSourceRange(); + } + } + } + } + + if (DiagnosticEmitted) + return; + + // Can't determine a more specific message, so display the generic error. + S.Diag(Loc, diag::err_typecheck_assign_const) << ExprRange << ConstUnknown; +} + /// CheckForModifiableLvalue - Verify that E is a modifiable lvalue. If not, /// emit an error and return true. If so, return false. static bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) { @@ -8630,8 +9147,6 @@ static bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) { bool NeedType = false; switch (IsLV) { // C99 6.5.16p2 case Expr::MLV_ConstQualified: - DiagID = diag::err_typecheck_assign_const; - // Use a specialized diagnostic when we're assigning to an object // from an enclosing function or block. if (NonConstCaptureKind NCCK = isReferenceToNonConstCapture(S, E)) { @@ -8670,14 +9185,24 @@ static bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) { if (Loc != OrigLoc) Assign = SourceRange(OrigLoc, OrigLoc); S.Diag(Loc, DiagID) << E->getSourceRange() << Assign; - // We need to preserve the AST regardless, so migration tool + // We need to preserve the AST regardless, so migration tool // can do its job. return false; } } } + // If none of the special cases above are triggered, then this is a + // simple const assignment. + if (DiagID == 0) { + DiagnoseConstAssignment(S, E, Loc); + return true; + } + break; + case Expr::MLV_ConstAddrSpace: + DiagnoseConstAssignment(S, E, Loc); + return true; case Expr::MLV_ArrayType: case Expr::MLV_ArrayTemporary: DiagID = diag::err_typecheck_array_not_modifiable_lvalue; @@ -9189,6 +9714,8 @@ QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) { !getLangOpts().CPlusPlus) { AddressOfError = AO_Register_Variable; } + } else if (isa<MSPropertyDecl>(dcl)) { + AddressOfError = AO_Property_Expansion; } else if (isa<FunctionTemplateDecl>(dcl)) { return Context.OverloadTy; } else if (isa<FieldDecl>(dcl) || isa<IndirectFieldDecl>(dcl)) { @@ -11443,12 +11970,9 @@ void Sema::PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl, bool IsDecltype) { - ExprEvalContexts.push_back( - ExpressionEvaluationContextRecord(NewContext, - ExprCleanupObjects.size(), - ExprNeedsCleanups, - LambdaContextDecl, - IsDecltype)); + ExprEvalContexts.emplace_back(NewContext, ExprCleanupObjects.size(), + ExprNeedsCleanups, LambdaContextDecl, + IsDecltype); ExprNeedsCleanups = false; if (!MaybeODRUseExprs.empty()) std::swap(MaybeODRUseExprs, ExprEvalContexts.back().SavedMaybeODRUseExprs); @@ -11580,7 +12104,8 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, // We (incorrectly) mark overload resolution as an unevaluated context, so we // can just check that here. Skip the rest of this function if we've already // marked the function as used. - if (Func->isUsed(false) || !IsPotentiallyEvaluatedContext(*this)) { + if (Func->isUsed(/*CheckUsedAttr=*/false) || + !IsPotentiallyEvaluatedContext(*this)) { // C++11 [temp.inst]p3: // Unless a function template specialization has been explicitly // instantiated or explicitly specialized, the function template @@ -11635,7 +12160,7 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, return; DefineImplicitDestructor(Loc, Destructor); } - if (Destructor->isVirtual()) + if (Destructor->isVirtual() && getLangOpts().AppleKext) MarkVTableUsed(Loc, Destructor->getParent()); } else if (CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(Func)) { if (MethodDecl->isOverloadedOperator() && @@ -11655,7 +12180,7 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, DefineImplicitLambdaToBlockPointerConversion(Loc, Conversion); else DefineImplicitLambdaToFunctionPointerConversion(Loc, Conversion); - } else if (MethodDecl->isVirtual()) + } else if (MethodDecl->isVirtual() && getLangOpts().AppleKext) MarkVTableUsed(Loc, MethodDecl->getParent()); } @@ -11727,7 +12252,7 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, if (mightHaveNonExternalLinkage(Func)) UndefinedButUsed.insert(std::make_pair(Func->getCanonicalDecl(), Loc)); else if (Func->getMostRecentDecl()->isInlined() && - (LangOpts.CPlusPlus || !LangOpts.GNUInline) && + !LangOpts.GNUInline && !Func->getMostRecentDecl()->hasAttr<GNUInlineAttr>()) UndefinedButUsed.insert(std::make_pair(Func->getCanonicalDecl(), Loc)); } @@ -12029,13 +12554,11 @@ static bool captureInCapturedRegion(CapturedRegionScopeInfo *RSI, } /// \brief Create a field within the lambda class for the variable -/// being captured. Handle Array captures. -static ExprResult addAsFieldToClosureType(Sema &S, - LambdaScopeInfo *LSI, - VarDecl *Var, QualType FieldType, - QualType DeclRefType, - SourceLocation Loc, - bool RefersToCapturedVariable) { +/// being captured. +static void addAsFieldToClosureType(Sema &S, LambdaScopeInfo *LSI, VarDecl *Var, + QualType FieldType, QualType DeclRefType, + SourceLocation Loc, + bool RefersToCapturedVariable) { CXXRecordDecl *Lambda = LSI->Lambda; // Build the non-static data member. @@ -12046,111 +12569,8 @@ static ExprResult addAsFieldToClosureType(Sema &S, Field->setImplicit(true); Field->setAccess(AS_private); Lambda->addDecl(Field); - - // C++11 [expr.prim.lambda]p21: - // When the lambda-expression is evaluated, the entities that - // are captured by copy are used to direct-initialize each - // corresponding non-static data member of the resulting closure - // object. (For array members, the array elements are - // direct-initialized in increasing subscript order.) These - // initializations are performed in the (unspecified) order in - // which the non-static data members are declared. - - // Introduce a new evaluation context for the initialization, so - // that temporaries introduced as part of the capture are retained - // to be re-"exported" from the lambda expression itself. - EnterExpressionEvaluationContext scope(S, Sema::PotentiallyEvaluated); - - // C++ [expr.prim.labda]p12: - // An entity captured by a lambda-expression is odr-used (3.2) in - // the scope containing the lambda-expression. - Expr *Ref = new (S.Context) DeclRefExpr(Var, RefersToCapturedVariable, - DeclRefType, VK_LValue, Loc); - Var->setReferenced(true); - Var->markUsed(S.Context); - - // When the field has array type, create index variables for each - // dimension of the array. We use these index variables to subscript - // the source array, and other clients (e.g., CodeGen) will perform - // the necessary iteration with these index variables. - SmallVector<VarDecl *, 4> IndexVariables; - QualType BaseType = FieldType; - QualType SizeType = S.Context.getSizeType(); - LSI->ArrayIndexStarts.push_back(LSI->ArrayIndexVars.size()); - while (const ConstantArrayType *Array - = S.Context.getAsConstantArrayType(BaseType)) { - // Create the iteration variable for this array index. - IdentifierInfo *IterationVarName = nullptr; - { - SmallString<8> Str; - llvm::raw_svector_ostream OS(Str); - OS << "__i" << IndexVariables.size(); - IterationVarName = &S.Context.Idents.get(OS.str()); - } - VarDecl *IterationVar - = VarDecl::Create(S.Context, S.CurContext, Loc, Loc, - IterationVarName, SizeType, - S.Context.getTrivialTypeSourceInfo(SizeType, Loc), - SC_None); - IndexVariables.push_back(IterationVar); - LSI->ArrayIndexVars.push_back(IterationVar); - - // Create a reference to the iteration variable. - ExprResult IterationVarRef - = S.BuildDeclRefExpr(IterationVar, SizeType, VK_LValue, Loc); - assert(!IterationVarRef.isInvalid() && - "Reference to invented variable cannot fail!"); - IterationVarRef = S.DefaultLvalueConversion(IterationVarRef.get()); - assert(!IterationVarRef.isInvalid() && - "Conversion of invented variable cannot fail!"); - - // Subscript the array with this iteration variable. - ExprResult Subscript = S.CreateBuiltinArraySubscriptExpr( - Ref, Loc, IterationVarRef.get(), Loc); - if (Subscript.isInvalid()) { - S.CleanupVarDeclMarking(); - S.DiscardCleanupsInEvaluationContext(); - return ExprError(); - } - - Ref = Subscript.get(); - BaseType = Array->getElementType(); - } - - // Construct the entity that we will be initializing. For an array, this - // will be first element in the array, which may require several levels - // of array-subscript entities. - SmallVector<InitializedEntity, 4> Entities; - Entities.reserve(1 + IndexVariables.size()); - Entities.push_back( - InitializedEntity::InitializeLambdaCapture(Var->getIdentifier(), - Field->getType(), Loc)); - for (unsigned I = 0, N = IndexVariables.size(); I != N; ++I) - Entities.push_back(InitializedEntity::InitializeElement(S.Context, - 0, - Entities.back())); - - InitializationKind InitKind - = InitializationKind::CreateDirect(Loc, Loc, Loc); - InitializationSequence Init(S, Entities.back(), InitKind, Ref); - ExprResult Result(true); - if (!Init.Diagnose(S, Entities.back(), InitKind, Ref)) - Result = Init.Perform(S, Entities.back(), InitKind, Ref); - - // If this initialization requires any cleanups (e.g., due to a - // default argument to a copy constructor), note that for the - // lambda. - if (S.ExprNeedsCleanups) - LSI->ExprNeedsCleanups = true; - - // Exit the expression evaluation context used for the capture. - S.CleanupVarDeclMarking(); - S.DiscardCleanupsInEvaluationContext(); - return Result; } - - /// \brief Capture the given variable in the lambda. static bool captureInLambda(LambdaScopeInfo *LSI, VarDecl *Var, @@ -12228,14 +12648,9 @@ static bool captureInLambda(LambdaScopeInfo *LSI, } // Capture this variable in the lambda. - Expr *CopyExpr = nullptr; - if (BuildAndDiagnose) { - ExprResult Result = addAsFieldToClosureType(S, LSI, Var, - CaptureType, DeclRefType, Loc, - RefersToCapturedVariable); - if (!Result.isInvalid()) - CopyExpr = Result.get(); - } + if (BuildAndDiagnose) + addAsFieldToClosureType(S, LSI, Var, CaptureType, DeclRefType, Loc, + RefersToCapturedVariable); // Compute the type of a reference to this captured variable. if (ByRef) @@ -12254,18 +12669,20 @@ static bool captureInLambda(LambdaScopeInfo *LSI, // Add the capture. if (BuildAndDiagnose) LSI->addCapture(Var, /*IsBlock=*/false, ByRef, RefersToCapturedVariable, - Loc, EllipsisLoc, CaptureType, CopyExpr); + Loc, EllipsisLoc, CaptureType, /*CopyExpr=*/nullptr); return true; } -bool Sema::tryCaptureVariable(VarDecl *Var, SourceLocation ExprLoc, - TryCaptureKind Kind, SourceLocation EllipsisLoc, - bool BuildAndDiagnose, - QualType &CaptureType, - QualType &DeclRefType, - const unsigned *const FunctionScopeIndexToStopAt) { - bool Nested = Var->isInitCapture(); +bool Sema::tryCaptureVariable( + VarDecl *Var, SourceLocation ExprLoc, TryCaptureKind Kind, + SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, + QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt) { + // An init-capture is notionally from the context surrounding its + // declaration, but its parent DC is the lambda class. + DeclContext *VarDC = Var->getDeclContext(); + if (Var->isInitCapture()) + VarDC = VarDC->getParent(); DeclContext *DC = CurContext; const unsigned MaxFunctionScopesIndex = FunctionScopeIndexToStopAt @@ -12281,9 +12698,9 @@ bool Sema::tryCaptureVariable(VarDecl *Var, SourceLocation ExprLoc, } - // If the variable is declared in the current context (and is not an - // init-capture), there is no need to capture it. - if (!Nested && Var->getDeclContext() == DC) return true; + // If the variable is declared in the current context, there is no need to + // capture it. + if (VarDC == DC) return true; // Capture global variables if it is required to use private copy of this // variable. @@ -12301,6 +12718,7 @@ bool Sema::tryCaptureVariable(VarDecl *Var, SourceLocation ExprLoc, // the variable. CaptureType = Var->getType(); DeclRefType = CaptureType.getNonReferenceType(); + bool Nested = false; bool Explicit = (Kind != TryCapture_Implicit); unsigned FunctionScopesIndex = MaxFunctionScopesIndex; do { @@ -12501,7 +12919,7 @@ bool Sema::tryCaptureVariable(VarDecl *Var, SourceLocation ExprLoc, FunctionScopesIndex--; DC = ParentDC; Explicit = false; - } while (!Var->getDeclContext()->Equals(DC)); + } while (!VarDC->Equals(DC)); // Walk back down the scope stack, (e.g. from outer lambda to inner lambda) // computing the type of the capture at each step, checking type-specific @@ -13740,7 +14158,17 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) { // Bound member functions. case BuiltinType::BoundMember: { ExprResult result = E; - tryToRecoverWithCall(result, PDiag(diag::err_bound_member_function), + const Expr *BME = E->IgnoreParens(); + PartialDiagnostic PD = PDiag(diag::err_bound_member_function); + // Try to give a nicer diagnostic if it is a bound member that we recognize. + if (isa<CXXPseudoDestructorExpr>(BME)) { + PD = PDiag(diag::err_dtor_expr_without_call) << /*pseudo-destructor*/ 1; + } else if (const auto *ME = dyn_cast<MemberExpr>(BME)) { + if (ME->getMemberNameInfo().getName().getNameKind() == + DeclarationName::CXXDestructorName) + PD = PDiag(diag::err_dtor_expr_without_call) << /*destructor*/ 0; + } + tryToRecoverWithCall(result, PD, /*complain*/ true); return result; } diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp index a92b7ac..7e305ff 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp @@ -113,6 +113,9 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc, bool isDependent = false; bool LookInScope = false; + if (SS.isInvalid()) + return ParsedType(); + // If we have an object type, it's because we are in a // pseudo-destructor-expression or a member access expression, and // we know what type we're looking for. @@ -644,37 +647,98 @@ ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, Diag(OpLoc, diag::err_omp_simd_region_cannot_use_stmt) << "throw"; if (Ex && !Ex->isTypeDependent()) { - ExprResult ExRes = CheckCXXThrowOperand(OpLoc, Ex, IsThrownVarInScope); - if (ExRes.isInvalid()) + QualType ExceptionObjectTy = Context.getExceptionObjectType(Ex->getType()); + if (CheckCXXThrowOperand(OpLoc, ExceptionObjectTy, Ex)) return ExprError(); - Ex = ExRes.get(); + + // Initialize the exception result. This implicitly weeds out + // abstract types or types with inaccessible copy constructors. + + // C++0x [class.copymove]p31: + // When certain criteria are met, an implementation is allowed to omit the + // copy/move construction of a class object [...] + // + // - in a throw-expression, when the operand is the name of a + // non-volatile automatic object (other than a function or + // catch-clause + // parameter) whose scope does not extend beyond the end of the + // innermost enclosing try-block (if there is one), the copy/move + // operation from the operand to the exception object (15.1) can be + // omitted by constructing the automatic object directly into the + // exception object + const VarDecl *NRVOVariable = nullptr; + if (IsThrownVarInScope) + NRVOVariable = getCopyElisionCandidate(QualType(), Ex, false); + + InitializedEntity Entity = InitializedEntity::InitializeException( + OpLoc, ExceptionObjectTy, + /*NRVO=*/NRVOVariable != nullptr); + ExprResult Res = PerformMoveOrCopyInitialization( + Entity, NRVOVariable, QualType(), Ex, IsThrownVarInScope); + if (Res.isInvalid()) + return ExprError(); + Ex = Res.get(); } - + return new (Context) CXXThrowExpr(Ex, Context.VoidTy, OpLoc, IsThrownVarInScope); } -/// CheckCXXThrowOperand - Validate the operand of a throw. -ExprResult Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *E, - bool IsThrownVarInScope) { - // C++ [except.throw]p3: - // A throw-expression initializes a temporary object, called the exception - // object, the type of which is determined by removing any top-level - // cv-qualifiers from the static type of the operand of throw and adjusting - // the type from "array of T" or "function returning T" to "pointer to T" - // or "pointer to function returning T", [...] - if (E->getType().hasQualifiers()) - E = ImpCastExprToType(E, E->getType().getUnqualifiedType(), CK_NoOp, - E->getValueKind()).get(); - - ExprResult Res = DefaultFunctionArrayConversion(E); - if (Res.isInvalid()) - return ExprError(); - E = Res.get(); +static void +collectPublicBases(CXXRecordDecl *RD, + llvm::DenseMap<CXXRecordDecl *, unsigned> &SubobjectsSeen, + llvm::SmallPtrSetImpl<CXXRecordDecl *> &VBases, + llvm::SetVector<CXXRecordDecl *> &PublicSubobjectsSeen, + bool ParentIsPublic) { + for (const CXXBaseSpecifier &BS : RD->bases()) { + CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl(); + bool NewSubobject; + // Virtual bases constitute the same subobject. Non-virtual bases are + // always distinct subobjects. + if (BS.isVirtual()) + NewSubobject = VBases.insert(BaseDecl).second; + else + NewSubobject = true; + + if (NewSubobject) + ++SubobjectsSeen[BaseDecl]; + + // Only add subobjects which have public access throughout the entire chain. + bool PublicPath = ParentIsPublic && BS.getAccessSpecifier() == AS_public; + if (PublicPath) + PublicSubobjectsSeen.insert(BaseDecl); + + // Recurse on to each base subobject. + collectPublicBases(BaseDecl, SubobjectsSeen, VBases, PublicSubobjectsSeen, + PublicPath); + } +} + +static void getUnambiguousPublicSubobjects( + CXXRecordDecl *RD, llvm::SmallVectorImpl<CXXRecordDecl *> &Objects) { + llvm::DenseMap<CXXRecordDecl *, unsigned> SubobjectsSeen; + llvm::SmallSet<CXXRecordDecl *, 2> VBases; + llvm::SetVector<CXXRecordDecl *> PublicSubobjectsSeen; + SubobjectsSeen[RD] = 1; + PublicSubobjectsSeen.insert(RD); + collectPublicBases(RD, SubobjectsSeen, VBases, PublicSubobjectsSeen, + /*ParentIsPublic=*/true); + + for (CXXRecordDecl *PublicSubobject : PublicSubobjectsSeen) { + // Skip ambiguous objects. + if (SubobjectsSeen[PublicSubobject] > 1) + continue; + Objects.push_back(PublicSubobject); + } +} + +/// CheckCXXThrowOperand - Validate the operand of a throw. +bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc, + QualType ExceptionObjectTy, Expr *E) { // If the type of the exception would be an incomplete type or a pointer // to an incomplete type other than (cv) void the program is ill-formed. - QualType Ty = E->getType(); + QualType Ty = ExceptionObjectTy; bool isPointer = false; if (const PointerType* Ptr = Ty->getAs<PointerType>()) { Ty = Ptr->getPointeeType(); @@ -682,49 +746,20 @@ ExprResult Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *E, } if (!isPointer || !Ty->isVoidType()) { if (RequireCompleteType(ThrowLoc, Ty, - isPointer? diag::err_throw_incomplete_ptr - : diag::err_throw_incomplete, + isPointer ? diag::err_throw_incomplete_ptr + : diag::err_throw_incomplete, E->getSourceRange())) - return ExprError(); + return true; - if (RequireNonAbstractType(ThrowLoc, E->getType(), + if (RequireNonAbstractType(ThrowLoc, ExceptionObjectTy, diag::err_throw_abstract_type, E)) - return ExprError(); + return true; } - // Initialize the exception result. This implicitly weeds out - // abstract types or types with inaccessible copy constructors. - - // C++0x [class.copymove]p31: - // When certain criteria are met, an implementation is allowed to omit the - // copy/move construction of a class object [...] - // - // - in a throw-expression, when the operand is the name of a - // non-volatile automatic object (other than a function or catch-clause - // parameter) whose scope does not extend beyond the end of the - // innermost enclosing try-block (if there is one), the copy/move - // operation from the operand to the exception object (15.1) can be - // omitted by constructing the automatic object directly into the - // exception object - const VarDecl *NRVOVariable = nullptr; - if (IsThrownVarInScope) - NRVOVariable = getCopyElisionCandidate(QualType(), E, false); - - InitializedEntity Entity = - InitializedEntity::InitializeException(ThrowLoc, E->getType(), - /*NRVO=*/NRVOVariable != nullptr); - Res = PerformMoveOrCopyInitialization(Entity, NRVOVariable, - QualType(), E, - IsThrownVarInScope); - if (Res.isInvalid()) - return ExprError(); - E = Res.get(); - // If the exception has class type, we need additional handling. - const RecordType *RecordTy = Ty->getAs<RecordType>(); - if (!RecordTy) - return E; - CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl()); + CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); + if (!RD) + return false; // If we are throwing a polymorphic class type or pointer thereof, // exception handling will make use of the vtable. @@ -732,22 +767,69 @@ ExprResult Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *E, // If a pointer is thrown, the referenced object will not be destroyed. if (isPointer) - return E; + return false; // If the class has a destructor, we must be able to call it. - if (RD->hasIrrelevantDestructor()) - return E; + if (!RD->hasIrrelevantDestructor()) { + if (CXXDestructorDecl *Destructor = LookupDestructor(RD)) { + MarkFunctionReferenced(E->getExprLoc(), Destructor); + CheckDestructorAccess(E->getExprLoc(), Destructor, + PDiag(diag::err_access_dtor_exception) << Ty); + if (DiagnoseUseOfDecl(Destructor, E->getExprLoc())) + return true; + } + } - CXXDestructorDecl *Destructor = LookupDestructor(RD); - if (!Destructor) - return E; + // The MSVC ABI creates a list of all types which can catch the exception + // object. This list also references the appropriate copy constructor to call + // if the object is caught by value and has a non-trivial copy constructor. + if (Context.getTargetInfo().getCXXABI().isMicrosoft()) { + // We are only interested in the public, unambiguous bases contained within + // the exception object. Bases which are ambiguous or otherwise + // inaccessible are not catchable types. + llvm::SmallVector<CXXRecordDecl *, 2> UnambiguousPublicSubobjects; + getUnambiguousPublicSubobjects(RD, UnambiguousPublicSubobjects); + + for (CXXRecordDecl *Subobject : UnambiguousPublicSubobjects) { + // Attempt to lookup the copy constructor. Various pieces of machinery + // will spring into action, like template instantiation, which means this + // cannot be a simple walk of the class's decls. Instead, we must perform + // lookup and overload resolution. + CXXConstructorDecl *CD = LookupCopyingConstructor(Subobject, 0); + if (!CD) + continue; - MarkFunctionReferenced(E->getExprLoc(), Destructor); - CheckDestructorAccess(E->getExprLoc(), Destructor, - PDiag(diag::err_access_dtor_exception) << Ty); - if (DiagnoseUseOfDecl(Destructor, E->getExprLoc())) - return ExprError(); - return E; + // Mark the constructor referenced as it is used by this throw expression. + MarkFunctionReferenced(E->getExprLoc(), CD); + + // Skip this copy constructor if it is trivial, we don't need to record it + // in the catchable type data. + if (CD->isTrivial()) + continue; + + // The copy constructor is non-trivial, create a mapping from this class + // type to this constructor. + // N.B. The selection of copy constructor is not sensitive to this + // particular throw-site. Lookup will be performed at the catch-site to + // ensure that the copy constructor is, in fact, accessible (via + // friendship or any other means). + Context.addCopyConstructorForExceptionObject(Subobject, CD); + + // We don't keep the instantiated default argument expressions around so + // we must rebuild them here. + for (unsigned I = 1, E = CD->getNumParams(); I != E; ++I) { + // Skip any default arguments that we've already instantiated. + if (Context.getDefaultArgExprForConstructor(CD, I)) + continue; + + Expr *DefaultArg = + BuildCXXDefaultArgExpr(ThrowLoc, CD, CD->getParamDecl(I)).get(); + Context.addDefaultArgExprForConstructor(CD, I, DefaultArg); + } + } + } + + return false; } QualType Sema::getCurrentThisType() { @@ -982,18 +1064,21 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo, Expr *Inner = Result.get(); if (CXXBindTemporaryExpr *BTE = dyn_cast_or_null<CXXBindTemporaryExpr>(Inner)) Inner = BTE->getSubExpr(); - if (isa<InitListExpr>(Inner)) { - // If the list-initialization doesn't involve a constructor call, we'll get - // the initializer-list (with corrected type) back, but that's not what we - // want, since it will be treated as an initializer list in further - // processing. Explicitly insert a cast here. + if (!isa<CXXTemporaryObjectExpr>(Inner)) { + // If we created a CXXTemporaryObjectExpr, that node also represents the + // functional cast. Otherwise, create an explicit cast to represent + // the syntactic form of a functional-style cast that was used here. + // + // FIXME: Creating a CXXFunctionalCastExpr around a CXXConstructExpr + // would give a more consistent AST representation than using a + // CXXTemporaryObjectExpr. It's also weird that the functional cast + // is sometimes handled by initialization and sometimes not. QualType ResultType = Result.get()->getType(); Result = CXXFunctionalCastExpr::Create( Context, ResultType, Expr::getValueKindForType(TInfo->getType()), TInfo, CK_NoOp, Result.get(), /*Path=*/nullptr, LParenLoc, RParenLoc); } - // FIXME: Improve AST representation? return Result; } @@ -1333,8 +1418,9 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal, << ConvTy->isEnumeralType() << ConvTy; } - virtual SemaDiagnosticBuilder diagnoseConversion( - Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override { + SemaDiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc, + QualType T, + QualType ConvTy) override { return S.Diag(Loc, S.getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_array_size_conversion @@ -2048,7 +2134,7 @@ void Sema::DeclareGlobalNewDelete() { void Sema::DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, QualType Param1, QualType Param2, - bool AddMallocAttr) { + bool AddRestrictAttr) { DeclContext *GlobalCtx = Context.getTranslationUnitDecl(); unsigned NumParams = Param2.isNull() ? 1 : 2; @@ -2071,8 +2157,9 @@ void Sema::DeclareGlobalAllocationFunction(DeclarationName Name, // FIXME: Do we need to check for default arguments here? if (InitialParam1Type == Param1 && (NumParams == 1 || InitialParam2Type == Param2)) { - if (AddMallocAttr && !Func->hasAttr<MallocAttr>()) - Func->addAttr(MallocAttr::CreateImplicit(Context)); + if (AddRestrictAttr && !Func->hasAttr<RestrictAttr>()) + Func->addAttr(RestrictAttr::CreateImplicit( + Context, RestrictAttr::GNU_malloc)); // Make the function visible to name lookup, even if we found it in // an unimported module. It either is an implicitly-declared global // allocation function, or is suppressing that function. @@ -2110,9 +2197,14 @@ void Sema::DeclareGlobalAllocationFunction(DeclarationName Name, SourceLocation(), Name, FnType, /*TInfo=*/nullptr, SC_None, false, true); Alloc->setImplicit(); + + // Implicit sized deallocation functions always have default visibility. + Alloc->addAttr(VisibilityAttr::CreateImplicit(Context, + VisibilityAttr::Default)); - if (AddMallocAttr) - Alloc->addAttr(MallocAttr::CreateImplicit(Context)); + if (AddRestrictAttr) + Alloc->addAttr( + RestrictAttr::CreateImplicit(Context, RestrictAttr::GNU_malloc)); ParmVarDecl *ParamDecls[2]; for (unsigned I = 0; I != NumParams; ++I) { @@ -2247,6 +2339,260 @@ bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, return false; } +namespace { +/// \brief Checks whether delete-expression, and new-expression used for +/// initializing deletee have the same array form. +class MismatchingNewDeleteDetector { +public: + enum MismatchResult { + /// Indicates that there is no mismatch or a mismatch cannot be proven. + NoMismatch, + /// Indicates that variable is initialized with mismatching form of \a new. + VarInitMismatches, + /// Indicates that member is initialized with mismatching form of \a new. + MemberInitMismatches, + /// Indicates that 1 or more constructors' definitions could not been + /// analyzed, and they will be checked again at the end of translation unit. + AnalyzeLater + }; + + /// \param EndOfTU True, if this is the final analysis at the end of + /// translation unit. False, if this is the initial analysis at the point + /// delete-expression was encountered. + explicit MismatchingNewDeleteDetector(bool EndOfTU) + : IsArrayForm(false), Field(nullptr), EndOfTU(EndOfTU), + HasUndefinedConstructors(false) {} + + /// \brief Checks whether pointee of a delete-expression is initialized with + /// matching form of new-expression. + /// + /// If return value is \c VarInitMismatches or \c MemberInitMismatches at the + /// point where delete-expression is encountered, then a warning will be + /// issued immediately. If return value is \c AnalyzeLater at the point where + /// delete-expression is seen, then member will be analyzed at the end of + /// translation unit. \c AnalyzeLater is returned iff at least one constructor + /// couldn't be analyzed. If at least one constructor initializes the member + /// with matching type of new, the return value is \c NoMismatch. + MismatchResult analyzeDeleteExpr(const CXXDeleteExpr *DE); + /// \brief Analyzes a class member. + /// \param Field Class member to analyze. + /// \param DeleteWasArrayForm Array form-ness of the delete-expression used + /// for deleting the \p Field. + MismatchResult analyzeField(FieldDecl *Field, bool DeleteWasArrayForm); + /// List of mismatching new-expressions used for initialization of the pointee + llvm::SmallVector<const CXXNewExpr *, 4> NewExprs; + /// Indicates whether delete-expression was in array form. + bool IsArrayForm; + FieldDecl *Field; + +private: + const bool EndOfTU; + /// \brief Indicates that there is at least one constructor without body. + bool HasUndefinedConstructors; + /// \brief Returns \c CXXNewExpr from given initialization expression. + /// \param E Expression used for initializing pointee in delete-expression. + /// E can be a single-element \c InitListExpr consisting of new-expression. + const CXXNewExpr *getNewExprFromInitListOrExpr(const Expr *E); + /// \brief Returns whether member is initialized with mismatching form of + /// \c new either by the member initializer or in-class initialization. + /// + /// If bodies of all constructors are not visible at the end of translation + /// unit or at least one constructor initializes member with the matching + /// form of \c new, mismatch cannot be proven, and this function will return + /// \c NoMismatch. + MismatchResult analyzeMemberExpr(const MemberExpr *ME); + /// \brief Returns whether variable is initialized with mismatching form of + /// \c new. + /// + /// If variable is initialized with matching form of \c new or variable is not + /// initialized with a \c new expression, this function will return true. + /// If variable is initialized with mismatching form of \c new, returns false. + /// \param D Variable to analyze. + bool hasMatchingVarInit(const DeclRefExpr *D); + /// \brief Checks whether the constructor initializes pointee with mismatching + /// form of \c new. + /// + /// Returns true, if member is initialized with matching form of \c new in + /// member initializer list. Returns false, if member is initialized with the + /// matching form of \c new in this constructor's initializer or given + /// constructor isn't defined at the point where delete-expression is seen, or + /// member isn't initialized by the constructor. + bool hasMatchingNewInCtor(const CXXConstructorDecl *CD); + /// \brief Checks whether member is initialized with matching form of + /// \c new in member initializer list. + bool hasMatchingNewInCtorInit(const CXXCtorInitializer *CI); + /// Checks whether member is initialized with mismatching form of \c new by + /// in-class initializer. + MismatchResult analyzeInClassInitializer(); +}; +} + +MismatchingNewDeleteDetector::MismatchResult +MismatchingNewDeleteDetector::analyzeDeleteExpr(const CXXDeleteExpr *DE) { + NewExprs.clear(); + assert(DE && "Expected delete-expression"); + IsArrayForm = DE->isArrayForm(); + const Expr *E = DE->getArgument()->IgnoreParenImpCasts(); + if (const MemberExpr *ME = dyn_cast<const MemberExpr>(E)) { + return analyzeMemberExpr(ME); + } else if (const DeclRefExpr *D = dyn_cast<const DeclRefExpr>(E)) { + if (!hasMatchingVarInit(D)) + return VarInitMismatches; + } + return NoMismatch; +} + +const CXXNewExpr * +MismatchingNewDeleteDetector::getNewExprFromInitListOrExpr(const Expr *E) { + assert(E != nullptr && "Expected a valid initializer expression"); + E = E->IgnoreParenImpCasts(); + if (const InitListExpr *ILE = dyn_cast<const InitListExpr>(E)) { + if (ILE->getNumInits() == 1) + E = dyn_cast<const CXXNewExpr>(ILE->getInit(0)->IgnoreParenImpCasts()); + } + + return dyn_cast_or_null<const CXXNewExpr>(E); +} + +bool MismatchingNewDeleteDetector::hasMatchingNewInCtorInit( + const CXXCtorInitializer *CI) { + const CXXNewExpr *NE = nullptr; + if (Field == CI->getMember() && + (NE = getNewExprFromInitListOrExpr(CI->getInit()))) { + if (NE->isArray() == IsArrayForm) + return true; + else + NewExprs.push_back(NE); + } + return false; +} + +bool MismatchingNewDeleteDetector::hasMatchingNewInCtor( + const CXXConstructorDecl *CD) { + if (CD->isImplicit()) + return false; + const FunctionDecl *Definition = CD; + if (!CD->isThisDeclarationADefinition() && !CD->isDefined(Definition)) { + HasUndefinedConstructors = true; + return EndOfTU; + } + for (const auto *CI : cast<const CXXConstructorDecl>(Definition)->inits()) { + if (hasMatchingNewInCtorInit(CI)) + return true; + } + return false; +} + +MismatchingNewDeleteDetector::MismatchResult +MismatchingNewDeleteDetector::analyzeInClassInitializer() { + assert(Field != nullptr && "This should be called only for members"); + if (const CXXNewExpr *NE = + getNewExprFromInitListOrExpr(Field->getInClassInitializer())) { + if (NE->isArray() != IsArrayForm) { + NewExprs.push_back(NE); + return MemberInitMismatches; + } + } + return NoMismatch; +} + +MismatchingNewDeleteDetector::MismatchResult +MismatchingNewDeleteDetector::analyzeField(FieldDecl *Field, + bool DeleteWasArrayForm) { + assert(Field != nullptr && "Analysis requires a valid class member."); + this->Field = Field; + IsArrayForm = DeleteWasArrayForm; + const CXXRecordDecl *RD = cast<const CXXRecordDecl>(Field->getParent()); + for (const auto *CD : RD->ctors()) { + if (hasMatchingNewInCtor(CD)) + return NoMismatch; + } + if (HasUndefinedConstructors) + return EndOfTU ? NoMismatch : AnalyzeLater; + if (!NewExprs.empty()) + return MemberInitMismatches; + return Field->hasInClassInitializer() ? analyzeInClassInitializer() + : NoMismatch; +} + +MismatchingNewDeleteDetector::MismatchResult +MismatchingNewDeleteDetector::analyzeMemberExpr(const MemberExpr *ME) { + assert(ME != nullptr && "Expected a member expression"); + if (FieldDecl *F = dyn_cast<FieldDecl>(ME->getMemberDecl())) + return analyzeField(F, IsArrayForm); + return NoMismatch; +} + +bool MismatchingNewDeleteDetector::hasMatchingVarInit(const DeclRefExpr *D) { + const CXXNewExpr *NE = nullptr; + if (const VarDecl *VD = dyn_cast<const VarDecl>(D->getDecl())) { + if (VD->hasInit() && (NE = getNewExprFromInitListOrExpr(VD->getInit())) && + NE->isArray() != IsArrayForm) { + NewExprs.push_back(NE); + } + } + return NewExprs.empty(); +} + +static void +DiagnoseMismatchedNewDelete(Sema &SemaRef, SourceLocation DeleteLoc, + const MismatchingNewDeleteDetector &Detector) { + SourceLocation EndOfDelete = SemaRef.getLocForEndOfToken(DeleteLoc); + FixItHint H; + if (!Detector.IsArrayForm) + H = FixItHint::CreateInsertion(EndOfDelete, "[]"); + else { + SourceLocation RSquare = Lexer::findLocationAfterToken( + DeleteLoc, tok::l_square, SemaRef.getSourceManager(), + SemaRef.getLangOpts(), true); + if (RSquare.isValid()) + H = FixItHint::CreateRemoval(SourceRange(EndOfDelete, RSquare)); + } + SemaRef.Diag(DeleteLoc, diag::warn_mismatched_delete_new) + << Detector.IsArrayForm << H; + + for (const auto *NE : Detector.NewExprs) + SemaRef.Diag(NE->getExprLoc(), diag::note_allocated_here) + << Detector.IsArrayForm; +} + +void Sema::AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE) { + if (Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) + return; + MismatchingNewDeleteDetector Detector(/*EndOfTU=*/false); + switch (Detector.analyzeDeleteExpr(DE)) { + case MismatchingNewDeleteDetector::VarInitMismatches: + case MismatchingNewDeleteDetector::MemberInitMismatches: { + DiagnoseMismatchedNewDelete(*this, DE->getLocStart(), Detector); + break; + } + case MismatchingNewDeleteDetector::AnalyzeLater: { + DeleteExprs[Detector.Field].push_back( + std::make_pair(DE->getLocStart(), DE->isArrayForm())); + break; + } + case MismatchingNewDeleteDetector::NoMismatch: + break; + } +} + +void Sema::AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, + bool DeleteWasArrayForm) { + MismatchingNewDeleteDetector Detector(/*EndOfTU=*/true); + switch (Detector.analyzeField(Field, DeleteWasArrayForm)) { + case MismatchingNewDeleteDetector::VarInitMismatches: + llvm_unreachable("This analysis should have been done for class members."); + case MismatchingNewDeleteDetector::AnalyzeLater: + llvm_unreachable("Analysis cannot be postponed any point beyond end of " + "translation unit."); + case MismatchingNewDeleteDetector::MemberInitMismatches: + DiagnoseMismatchedNewDelete(*this, DeleteLoc, Detector); + break; + case MismatchingNewDeleteDetector::NoMismatch: + break; + } +} + /// ActOnCXXDelete - Parsed a C++ 'delete' expression (C++ 5.3.5), as in: /// @code ::delete ptr; @endcode /// or @@ -2362,12 +2708,6 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, } } - // C++ [expr.delete]p2: - // [Note: a pointer to a const type can be the operand of a - // delete-expression; it is not necessary to cast away the constness - // (5.2.11) of the pointer expression before it is used as the operand - // of the delete-expression. ] - if (Pointee->isArrayType() && !ArrayForm) { Diag(StartLoc, diag::warn_delete_array_type) << Type << Ex.get()->getSourceRange() @@ -2442,7 +2782,7 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, DeleteName); MarkFunctionReferenced(StartLoc, OperatorDelete); - + // Check access and ambiguity of operator delete and destructor. if (PointeeRD) { if (CXXDestructorDecl *Dtor = LookupDestructor(PointeeRD)) { @@ -2452,9 +2792,11 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, } } - return new (Context) CXXDeleteExpr( + CXXDeleteExpr *Result = new (Context) CXXDeleteExpr( Context.VoidTy, UseGlobal, ArrayForm, ArrayFormAsWritten, UsualArrayDeleteWantsSize, OperatorDelete, Ex.get(), StartLoc); + AnalyzeDeleteExprMismatch(Result); + return Result; } /// \brief Check the use of the given variable as a C++ condition in an if, @@ -2570,6 +2912,8 @@ static ExprResult BuildCXXCastArgument(Sema &S, S.CheckConstructorAccess(CastLoc, Constructor, InitializedEntity::InitializeTemporary(Ty), Constructor->getAccess()); + if (S.DiagnoseUseOfDecl(Method, CastLoc)) + return ExprError(); ExprResult Result = S.BuildCXXConstructExpr( CastLoc, Ty, cast<CXXConstructorDecl>(Method), @@ -2585,6 +2929,10 @@ static ExprResult BuildCXXCastArgument(Sema &S, case CK_UserDefinedConversion: { assert(!From->getType()->isPointerType() && "Arg can't have pointer type!"); + S.CheckMemberOperatorAccess(CastLoc, From, /*arg*/ nullptr, FoundDecl); + if (S.DiagnoseUseOfDecl(Method, CastLoc)) + return ExprError(); + // Create an implicit call expr that calls it. CXXConversionDecl *Conv = cast<CXXConversionDecl>(Method); ExprResult Result = S.BuildCXXMemberCallExpr(From, FoundDecl, Conv, @@ -2596,8 +2944,6 @@ static ExprResult BuildCXXCastArgument(Sema &S, CK_UserDefinedConversion, Result.get(), nullptr, Result.get()->getValueKind()); - S.CheckMemberOperatorAccess(CastLoc, From, /*arg*/ nullptr, FoundDecl); - return S.MaybeBindToTemporary(Result.get()); } } @@ -2628,7 +2974,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType, FunctionDecl *FD = ICS.UserDefined.ConversionFunction; CastKind CastKind; QualType BeforeToType; - assert(FD && "FIXME: aggregate initialization from init list"); + assert(FD && "no conversion function for user-defined conversion seq"); if (const CXXConversionDecl *Conv = dyn_cast<CXXConversionDecl>(FD)) { CastKind = CK_UserDefinedConversion; @@ -2989,8 +3335,18 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType, break; case ICK_Vector_Splat: - From = ImpCastExprToType(From, ToType, CK_VectorSplat, - VK_RValue, /*BasePath=*/nullptr, CCK).get(); + // Vector splat from any arithmetic type to a vector. + // Cast to the element type. + { + QualType elType = ToType->getAs<ExtVectorType>()->getElementType(); + if (elType != From->getType()) { + ExprResult E = From; + From = ImpCastExprToType(From, elType, + PrepareScalarCast(E, elType)).get(); + } + From = ImpCastExprToType(From, ToType, CK_VectorSplat, + VK_RValue, /*BasePath=*/nullptr, CCK).get(); + } break; case ICK_Complex_Real: @@ -3529,8 +3885,8 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT, bool FoundConstructor = false; unsigned FoundTQs; - DeclContext::lookup_const_result R = Self.LookupConstructors(RD); - for (DeclContext::lookup_const_iterator Con = R.begin(), + DeclContext::lookup_result R = Self.LookupConstructors(RD); + for (DeclContext::lookup_iterator Con = R.begin(), ConEnd = R.end(); Con != ConEnd; ++Con) { // A template constructor is never a copy constructor. // FIXME: However, it may actually be selected at the actual overload @@ -3569,8 +3925,8 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT, return true; bool FoundConstructor = false; - DeclContext::lookup_const_result R = Self.LookupConstructors(RD); - for (DeclContext::lookup_const_iterator Con = R.begin(), + DeclContext::lookup_result R = Self.LookupConstructors(RD); + for (DeclContext::lookup_iterator Con = R.begin(), ConEnd = R.end(); Con != ConEnd; ++Con) { // FIXME: In C++0x, a constructor template can be a default constructor. if (isa<FunctionTemplateDecl>(*Con)) @@ -5139,7 +5495,7 @@ ExprResult Sema::ActOnDecltypeExpression(Expr *E) { if (Call == TopCall) continue; - if (CheckCallReturnType(Call->getCallReturnType(), + if (CheckCallReturnType(Call->getCallReturnType(Context), Call->getLocStart(), Call, Call->getDirectCallee())) return ExprError(); @@ -5202,10 +5558,11 @@ static void noteOperatorArrows(Sema &S, } } -ExprResult -Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, - tok::TokenKind OpKind, ParsedType &ObjectType, - bool &MayBePseudoDestructor) { +ExprResult Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base, + SourceLocation OpLoc, + tok::TokenKind OpKind, + ParsedType &ObjectType, + bool &MayBePseudoDestructor) { // Since this might be a postfix expression, get rid of ParenListExprs. ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Base); if (Result.isInvalid()) return ExprError(); @@ -5339,20 +5696,6 @@ Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, return Base; } -ExprResult Sema::DiagnoseDtorReference(SourceLocation NameLoc, - Expr *MemExpr) { - SourceLocation ExpectedLParenLoc = PP.getLocForEndOfToken(NameLoc); - Diag(MemExpr->getLocStart(), diag::err_dtor_expr_without_call) - << isa<CXXPseudoDestructorExpr>(MemExpr) - << FixItHint::CreateInsertion(ExpectedLParenLoc, "()"); - - return ActOnCallExpr(/*Scope*/ nullptr, - MemExpr, - /*LPLoc*/ ExpectedLParenLoc, - None, - /*RPLoc*/ ExpectedLParenLoc); -} - static bool CheckArrow(Sema& S, QualType& ObjectType, Expr *&Base, tok::TokenKind& OpKind, SourceLocation OpLoc) { if (Base->hasPlaceholderType()) { @@ -5393,8 +5736,7 @@ ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base, TypeSourceInfo *ScopeTypeInfo, SourceLocation CCLoc, SourceLocation TildeLoc, - PseudoDestructorTypeStorage Destructed, - bool HasTrailingLParen) { + PseudoDestructorTypeStorage Destructed) { TypeSourceInfo *DestructedTypeInfo = Destructed.getTypeSourceInfo(); QualType ObjectType; @@ -5482,10 +5824,7 @@ ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base, TildeLoc, Destructed); - if (HasTrailingLParen) - return Result; - - return DiagnoseDtorReference(Destructed.getLocation(), Result); + return Result; } ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base, @@ -5495,8 +5834,7 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, - UnqualifiedId &SecondTypeName, - bool HasTrailingLParen) { + UnqualifiedId &SecondTypeName) { assert((FirstTypeName.getKind() == UnqualifiedId::IK_TemplateId || FirstTypeName.getKind() == UnqualifiedId::IK_Identifier) && "Invalid first type name in pseudo-destructor"); @@ -5623,15 +5961,14 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base, return BuildPseudoDestructorExpr(Base, OpLoc, OpKind, SS, ScopeTypeInfo, CCLoc, TildeLoc, - Destructed, HasTrailingLParen); + Destructed); } ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, - const DeclSpec& DS, - bool HasTrailingLParen) { + const DeclSpec& DS) { QualType ObjectType; if (CheckArrow(*this, ObjectType, Base, OpKind, OpLoc)) return ExprError(); @@ -5647,7 +5984,7 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base, return BuildPseudoDestructorExpr(Base, OpLoc, OpKind, CXXScopeSpec(), nullptr, SourceLocation(), TildeLoc, - Destructed, HasTrailingLParen); + Destructed); } ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl, @@ -5685,10 +6022,9 @@ ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl, if (Exp.isInvalid()) return true; - MemberExpr *ME = - new (Context) MemberExpr(Exp.get(), /*IsArrow=*/false, Method, - SourceLocation(), Context.BoundMemberTy, - VK_RValue, OK_Ordinary); + MemberExpr *ME = new (Context) MemberExpr( + Exp.get(), /*IsArrow=*/false, SourceLocation(), Method, SourceLocation(), + Context.BoundMemberTy, VK_RValue, OK_Ordinary); if (HadMultipleCandidates) ME->setHadMultipleCandidates(true); MarkMemberReferenced(ME); @@ -6052,6 +6388,8 @@ public: class TransformTypos : public TreeTransform<TransformTypos> { typedef TreeTransform<TransformTypos> BaseTransform; + VarDecl *InitDecl; // A decl to avoid as a correction because it is in the + // process of being initialized. llvm::function_ref<ExprResult(Expr *)> ExprFilter; llvm::SmallSetVector<TypoExpr *, 2> TypoExprs, AmbiguousTypoExprs; llvm::SmallDenseMap<TypoExpr *, ExprResult, 2> TransformCache; @@ -6130,8 +6468,8 @@ class TransformTypos : public TreeTransform<TransformTypos> { } public: - TransformTypos(Sema &SemaRef, llvm::function_ref<ExprResult(Expr *)> Filter) - : BaseTransform(SemaRef), ExprFilter(Filter) {} + TransformTypos(Sema &SemaRef, VarDecl *InitDecl, llvm::function_ref<ExprResult(Expr *)> Filter) + : BaseTransform(SemaRef), InitDecl(InitDecl), ExprFilter(Filter) {} ExprResult RebuildCallExpr(Expr *Callee, SourceLocation LParenLoc, MultiExprArg Args, @@ -6210,6 +6548,8 @@ public: // For the first TypoExpr and an uncached TypoExpr, find the next likely // typo correction and return it. while (TypoCorrection TC = State.Consumer->getNextCorrection()) { + if (InitDecl && TC.getCorrectionDecl() == InitDecl) + continue; ExprResult NE = State.RecoveryHandler ? State.RecoveryHandler(SemaRef, E, TC) : attemptRecovery(SemaRef, *State.Consumer, TC); @@ -6234,8 +6574,9 @@ public: }; } -ExprResult Sema::CorrectDelayedTyposInExpr( - Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { +ExprResult +Sema::CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl, + llvm::function_ref<ExprResult(Expr *)> Filter) { // If the current evaluation context indicates there are uncorrected typos // and the current expression isn't guaranteed to not have typos, try to // resolve any TypoExpr nodes that might be in the expression. @@ -6246,7 +6587,7 @@ ExprResult Sema::CorrectDelayedTyposInExpr( assert(TyposInContext < ~0U && "Recursive call of CorrectDelayedTyposInExpr"); ExprEvalContexts.back().NumTypos = ~0U; auto TyposResolved = DelayedTypos.size(); - auto Result = TransformTypos(*this, Filter).Transform(E); + auto Result = TransformTypos(*this, InitDecl, Filter).Transform(E); ExprEvalContexts.back().NumTypos = TyposInContext; TyposResolved -= DelayedTypos.size(); if (Result.isInvalid() || Result.get() != E) { diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp index af1cf90..e421349 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp @@ -109,9 +109,8 @@ static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef, NamedDecl *D = *I; if (D->isCXXInstanceMember()) { - if (dyn_cast<FieldDecl>(D) || dyn_cast<MSPropertyDecl>(D) - || dyn_cast<IndirectFieldDecl>(D)) - isField = true; + isField |= isa<FieldDecl>(D) || isa<MSPropertyDecl>(D) || + isa<IndirectFieldDecl>(D); CXXRecordDecl *R = cast<CXXRecordDecl>(D->getDeclContext()); Classes.insert(R->getCanonicalDecl()); @@ -732,8 +731,8 @@ Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType, static ExprResult BuildFieldReferenceExpr(Sema &S, Expr *BaseExpr, bool IsArrow, - const CXXScopeSpec &SS, FieldDecl *Field, - DeclAccessPair FoundDecl, + SourceLocation OpLoc, const CXXScopeSpec &SS, + FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult @@ -820,10 +819,10 @@ Sema::BuildAnonymousStructUnionMemberReference(const CXXScopeSpec &SS, // Make a nameInfo that properly uses the anonymous name. DeclarationNameInfo memberNameInfo(field->getDeclName(), loc); - + result = BuildFieldReferenceExpr(*this, result, baseObjectIsPointer, - EmptySS, field, foundDecl, - memberNameInfo).get(); + SourceLocation(), EmptySS, field, + foundDecl, memberNameInfo).get(); if (!result) return ExprError(); @@ -841,9 +840,10 @@ Sema::BuildAnonymousStructUnionMemberReference(const CXXScopeSpec &SS, DeclAccessPair fakeFoundDecl = DeclAccessPair::make(field, field->getAccess()); - result = BuildFieldReferenceExpr(*this, result, /*isarrow*/ false, - (FI == FEnd? SS : EmptySS), field, - fakeFoundDecl, memberNameInfo).get(); + result = + BuildFieldReferenceExpr(*this, result, /*isarrow*/ false, + SourceLocation(), (FI == FEnd ? SS : EmptySS), + field, fakeFoundDecl, memberNameInfo).get(); } return result; @@ -863,18 +863,16 @@ BuildMSPropertyRefExpr(Sema &S, Expr *BaseExpr, bool IsArrow, } /// \brief Build a MemberExpr AST node. -static MemberExpr * -BuildMemberExpr(Sema &SemaRef, ASTContext &C, Expr *Base, bool isArrow, - const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, - ValueDecl *Member, DeclAccessPair FoundDecl, - const DeclarationNameInfo &MemberNameInfo, QualType Ty, - ExprValueKind VK, ExprObjectKind OK, - const TemplateArgumentListInfo *TemplateArgs = nullptr) { +static MemberExpr *BuildMemberExpr( + Sema &SemaRef, ASTContext &C, Expr *Base, bool isArrow, + SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, + ValueDecl *Member, DeclAccessPair FoundDecl, + const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, + ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr) { assert((!isArrow || Base->isRValue()) && "-> base must be a pointer rvalue"); - MemberExpr *E = - MemberExpr::Create(C, Base, isArrow, SS.getWithLocInContext(C), - TemplateKWLoc, Member, FoundDecl, MemberNameInfo, - TemplateArgs, Ty, VK, OK); + MemberExpr *E = MemberExpr::Create( + C, Base, isArrow, OpLoc, SS.getWithLocInContext(C), TemplateKWLoc, Member, + FoundDecl, MemberNameInfo, TemplateArgs, Ty, VK, OK); SemaRef.MarkMemberReferenced(E); return E; } @@ -969,8 +967,7 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType, CXXScopeSpec TempSS(SS); RetryExpr = ActOnMemberAccessExpr( ExtraArgs->S, RetryExpr.get(), OpLoc, tok::arrow, TempSS, - TemplateKWLoc, ExtraArgs->Id, ExtraArgs->ObjCImpDecl, - ExtraArgs->HasTrailingLParen); + TemplateKWLoc, ExtraArgs->Id, ExtraArgs->ObjCImpDecl); } if (Trap.hasErrorOccurred()) RetryExpr = ExprError(); @@ -1058,8 +1055,8 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType, return ExprError(); if (FieldDecl *FD = dyn_cast<FieldDecl>(MemberDecl)) - return BuildFieldReferenceExpr(*this, BaseExpr, IsArrow, - SS, FD, FoundDecl, MemberNameInfo); + return BuildFieldReferenceExpr(*this, BaseExpr, IsArrow, OpLoc, SS, FD, + FoundDecl, MemberNameInfo); if (MSPropertyDecl *PD = dyn_cast<MSPropertyDecl>(MemberDecl)) return BuildMSPropertyRefExpr(*this, BaseExpr, IsArrow, SS, PD, @@ -1073,8 +1070,8 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType, OpLoc); if (VarDecl *Var = dyn_cast<VarDecl>(MemberDecl)) { - return BuildMemberExpr(*this, Context, BaseExpr, IsArrow, SS, TemplateKWLoc, - Var, FoundDecl, MemberNameInfo, + return BuildMemberExpr(*this, Context, BaseExpr, IsArrow, OpLoc, SS, + TemplateKWLoc, Var, FoundDecl, MemberNameInfo, Var->getType().getNonReferenceType(), VK_LValue, OK_Ordinary); } @@ -1090,16 +1087,16 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType, type = MemberFn->getType(); } - return BuildMemberExpr(*this, Context, BaseExpr, IsArrow, SS, TemplateKWLoc, - MemberFn, FoundDecl, MemberNameInfo, type, valueKind, - OK_Ordinary); + return BuildMemberExpr(*this, Context, BaseExpr, IsArrow, OpLoc, SS, + TemplateKWLoc, MemberFn, FoundDecl, MemberNameInfo, + type, valueKind, OK_Ordinary); } assert(!isa<FunctionDecl>(MemberDecl) && "member function not C++ method?"); if (EnumConstantDecl *Enum = dyn_cast<EnumConstantDecl>(MemberDecl)) { - return BuildMemberExpr(*this, Context, BaseExpr, IsArrow, SS, TemplateKWLoc, - Enum, FoundDecl, MemberNameInfo, Enum->getType(), - VK_RValue, OK_Ordinary); + return BuildMemberExpr(*this, Context, BaseExpr, IsArrow, OpLoc, SS, + TemplateKWLoc, Enum, FoundDecl, MemberNameInfo, + Enum->getType(), VK_RValue, OK_Ordinary); } // We found something that we didn't expect. Complain. @@ -1521,7 +1518,15 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R, if (BaseType->isExtVectorType()) { // FIXME: this expr should store IsArrow. IdentifierInfo *Member = MemberName.getAsIdentifierInfo(); - ExprValueKind VK = (IsArrow ? VK_LValue : BaseExpr.get()->getValueKind()); + ExprValueKind VK; + if (IsArrow) + VK = VK_LValue; + else { + if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(BaseExpr.get())) + VK = POE->getSyntacticForm()->getValueKind(); + else + VK = BaseExpr.get()->getValueKind(); + } QualType ret = CheckExtVectorComponent(S, BaseType, VK, OpLoc, Member, MemberLoc); if (ret.isNull()) @@ -1591,9 +1596,6 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R, /// possibilities, including destructor and operator references. /// /// \param OpKind either tok::arrow or tok::period -/// \param HasTrailingLParen whether the next token is '(', which -/// is used to diagnose mis-uses of special members that can -/// only be called /// \param ObjCImpDecl the current Objective-C \@implementation /// decl; this is an ugly hack around the fact that Objective-C /// \@implementations aren't properly put in the context chain @@ -1603,24 +1605,10 @@ ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, - Decl *ObjCImpDecl, - bool HasTrailingLParen) { + Decl *ObjCImpDecl) { if (SS.isSet() && SS.isInvalid()) return ExprError(); - // The only way a reference to a destructor can be used is to - // immediately call it. If the next token is not a '(', produce - // a diagnostic and build the call now. - if (!HasTrailingLParen && - Id.getKind() == UnqualifiedId::IK_DestructorName) { - ExprResult DtorAccess = - ActOnMemberAccessExpr(S, Base, OpLoc, OpKind, SS, TemplateKWLoc, Id, - ObjCImpDecl, /*HasTrailingLParen*/true); - if (DtorAccess.isInvalid()) - return DtorAccess; - return DiagnoseDtorReference(Id.getLocStart(), DtorAccess.get()); - } - // Warn about the explicit constructor calls Microsoft extension. if (getLangOpts().MicrosoftExt && Id.getKind() == UnqualifiedId::IK_ConstructorName) @@ -1653,8 +1641,7 @@ ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base, NameInfo, TemplateArgs); } - ActOnMemberAccessExtraArgs ExtraArgs = {S, Id, ObjCImpDecl, - HasTrailingLParen}; + ActOnMemberAccessExtraArgs ExtraArgs = {S, Id, ObjCImpDecl}; return BuildMemberReferenceExpr(Base, Base->getType(), OpLoc, IsArrow, SS, TemplateKWLoc, FirstQualifierInScope, NameInfo, TemplateArgs, &ExtraArgs); @@ -1662,8 +1649,8 @@ ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base, static ExprResult BuildFieldReferenceExpr(Sema &S, Expr *BaseExpr, bool IsArrow, - const CXXScopeSpec &SS, FieldDecl *Field, - DeclAccessPair FoundDecl, + SourceLocation OpLoc, const CXXScopeSpec &SS, + FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo) { // x.a is an l-value if 'a' has a reference type. Otherwise: // x.a is an l-value/x-value/pr-value if the base is (and note @@ -1716,7 +1703,7 @@ BuildFieldReferenceExpr(Sema &S, Expr *BaseExpr, bool IsArrow, FoundDecl, Field); if (Base.isInvalid()) return ExprError(); - return BuildMemberExpr(S, S.Context, Base.get(), IsArrow, SS, + return BuildMemberExpr(S, S.Context, Base.get(), IsArrow, OpLoc, SS, /*TemplateKWLoc=*/SourceLocation(), Field, FoundDecl, MemberNameInfo, MemberType, VK, OK); } diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp index 9c3b51c..63b7485 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp @@ -218,7 +218,9 @@ static ObjCMethodDecl *getNSNumberFactoryMethod(Sema &S, SourceLocation Loc, S.Diag(Loc, diag::err_undeclared_nsnumber); return nullptr; } - + } + + if (S.NSNumberPointer.isNull()) { // generate the pointer to NSNumber type. QualType NSNumberObject = CX.getObjCInterfaceType(S.NSNumberDecl); S.NSNumberPointer = CX.getObjCObjectPointerType(NSNumberObject); @@ -1041,7 +1043,7 @@ ExprResult Sema::ParseObjCSelectorExpression(Selector Sel, SourceLocation RParenLoc, bool WarnMultipleSelectors) { ObjCMethodDecl *Method = LookupInstanceMethodInGlobalPool(Sel, - SourceRange(LParenLoc, RParenLoc), false, false); + SourceRange(LParenLoc, RParenLoc)); if (!Method) Method = LookupFactoryMethodInGlobalPool(Sel, SourceRange(LParenLoc, RParenLoc)); @@ -1059,15 +1061,11 @@ ExprResult Sema::ParseObjCSelectorExpression(Selector Sel, } else DiagnoseMismatchedSelectors(*this, AtLoc, Method, LParenLoc, RParenLoc, WarnMultipleSelectors); - + if (Method && Method->getImplementationControl() != ObjCMethodDecl::Optional && - !getSourceManager().isInSystemHeader(Method->getLocation())) { - llvm::DenseMap<Selector, SourceLocation>::iterator Pos - = ReferencedSelectors.find(Sel); - if (Pos == ReferencedSelectors.end()) - ReferencedSelectors.insert(std::make_pair(Sel, AtLoc)); - } + !getSourceManager().isInSystemHeader(Method->getLocation())) + ReferencedSelectors.insert(std::make_pair(Sel, AtLoc)); // In ARC, forbid the user from using @selector for // retain/release/autorelease/dealloc/retainCount. @@ -1507,64 +1505,6 @@ ObjCMethodDecl *Sema::LookupMethodInQualifiedType(Selector Sel, return nullptr; } -static void DiagnoseARCUseOfWeakReceiver(Sema &S, Expr *Receiver) { - if (!Receiver) - return; - - if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(Receiver)) - Receiver = OVE->getSourceExpr(); - - Expr *RExpr = Receiver->IgnoreParenImpCasts(); - SourceLocation Loc = RExpr->getLocStart(); - QualType T = RExpr->getType(); - const ObjCPropertyDecl *PDecl = nullptr; - const ObjCMethodDecl *GDecl = nullptr; - if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(RExpr)) { - RExpr = POE->getSyntacticForm(); - if (ObjCPropertyRefExpr *PRE = dyn_cast<ObjCPropertyRefExpr>(RExpr)) { - if (PRE->isImplicitProperty()) { - GDecl = PRE->getImplicitPropertyGetter(); - if (GDecl) { - T = GDecl->getReturnType(); - } - } - else { - PDecl = PRE->getExplicitProperty(); - if (PDecl) { - T = PDecl->getType(); - } - } - } - } - else if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(RExpr)) { - // See if receiver is a method which envokes a synthesized getter - // backing a 'weak' property. - ObjCMethodDecl *Method = ME->getMethodDecl(); - if (Method && Method->getSelector().getNumArgs() == 0) { - PDecl = Method->findPropertyDecl(); - if (PDecl) - T = PDecl->getType(); - } - } - - if (T.getObjCLifetime() != Qualifiers::OCL_Weak) { - if (!PDecl) - return; - if (!(PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak)) - return; - } - - S.Diag(Loc, diag::warn_receiver_is_weak) - << ((!PDecl && !GDecl) ? 0 : (PDecl ? 1 : 2)); - - if (PDecl) - S.Diag(PDecl->getLocation(), diag::note_property_declare); - else if (GDecl) - S.Diag(GDecl->getLocation(), diag::note_method_declared_at) << GDecl; - - S.Diag(Loc, diag::note_arc_assign_to_strong); -} - /// HandleExprPropertyRefExpr - Handle foo.bar where foo is a pointer to an /// objective C interface. This is a property reference expression. ExprResult Sema:: @@ -1751,29 +1691,30 @@ ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IsSuper = true; if (ObjCMethodDecl *CurMethod = tryCaptureObjCSelf(receiverNameLoc)) { - if (CurMethod->isInstanceMethod()) { - ObjCInterfaceDecl *Super = - CurMethod->getClassInterface()->getSuperClass(); - if (!Super) { - // The current class does not have a superclass. - Diag(receiverNameLoc, diag::error_root_class_cannot_use_super) - << CurMethod->getClassInterface()->getIdentifier(); - return ExprError(); + if (ObjCInterfaceDecl *Class = CurMethod->getClassInterface()) { + if (CurMethod->isInstanceMethod()) { + ObjCInterfaceDecl *Super = Class->getSuperClass(); + if (!Super) { + // The current class does not have a superclass. + Diag(receiverNameLoc, diag::error_root_class_cannot_use_super) + << Class->getIdentifier(); + return ExprError(); + } + QualType T = Context.getObjCInterfaceType(Super); + T = Context.getObjCObjectPointerType(T); + + return HandleExprPropertyRefExpr(T->getAsObjCInterfacePointerType(), + /*BaseExpr*/nullptr, + SourceLocation()/*OpLoc*/, + &propertyName, + propertyNameLoc, + receiverNameLoc, T, true); } - QualType T = Context.getObjCInterfaceType(Super); - T = Context.getObjCObjectPointerType(T); - - return HandleExprPropertyRefExpr(T->getAsObjCInterfacePointerType(), - /*BaseExpr*/nullptr, - SourceLocation()/*OpLoc*/, - &propertyName, - propertyNameLoc, - receiverNameLoc, T, true); - } - // Otherwise, if this is a class method, try dispatching to our - // superclass. - IFace = CurMethod->getClassInterface()->getSuperClass(); + // Otherwise, if this is a class method, try dispatching to our + // superclass. + IFace = Class->getSuperClass(); + } } } @@ -2452,8 +2393,11 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver, if (ObjCMethodDecl *BestMethod = SelectBestMethod(Sel, ArgsIn, Method->isInstanceMethod())) Method = BestMethod; - if (!AreMultipleMethodsInGlobalPool(Sel, Method->isInstanceMethod())) + if (!AreMultipleMethodsInGlobalPool(Sel, Method, + SourceRange(LBracLoc, RBracLoc), + receiverIsId)) { DiagnoseUseOfDecl(Method, SelLoc); + } } } else if (ReceiverType->isObjCClassType() || ReceiverType->isObjCQualifiedClassType()) { @@ -2491,14 +2435,12 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver, // If not messaging 'self', look for any factory method named 'Sel'. if (!Receiver || !isSelfExpr(Receiver)) { Method = LookupFactoryMethodInGlobalPool(Sel, - SourceRange(LBracLoc, RBracLoc), - true); + SourceRange(LBracLoc, RBracLoc)); if (!Method) { // If no class (factory) method was found, check if an _instance_ // method of the same name exists in the root class only. Method = LookupInstanceMethodInGlobalPool(Sel, - SourceRange(LBracLoc, RBracLoc), - true); + SourceRange(LBracLoc, RBracLoc)); if (Method) if (const ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(Method->getDeclContext())) { @@ -2575,6 +2517,14 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver, if (OCIType->qual_empty()) { Method = LookupInstanceMethodInGlobalPool(Sel, SourceRange(LBracLoc, RBracLoc)); + if (Method) { + if (auto BestMethod = + SelectBestMethod(Sel, ArgsIn, Method->isInstanceMethod())) + Method = BestMethod; + AreMultipleMethodsInGlobalPool(Sel, Method, + SourceRange(LBracLoc, RBracLoc), + true); + } if (Method && !forwardClass) Diag(SelLoc, diag::warn_maynot_respond) << OCIType->getInterfaceDecl()->getIdentifier() @@ -2757,15 +2707,6 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver, } if (getLangOpts().ObjCAutoRefCount) { - // Do not warn about IBOutlet weak property receivers being set to null - // as this cannot asynchronously happen. - bool WarnWeakReceiver = true; - if (isImplicit && Method) - if (const ObjCPropertyDecl *PropertyDecl = Method->findPropertyDecl()) - WarnWeakReceiver = !PropertyDecl->hasAttr<IBOutletAttr>(); - if (WarnWeakReceiver) - DiagnoseARCUseOfWeakReceiver(*this, Receiver); - // In ARC, annotate delegate init calls. if (Result->getMethodFamily() == OMF_init && (SuperLoc.isValid() || isSelfExpr(Receiver))) { @@ -2796,7 +2737,9 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver, } } } - + + CheckObjCCircularContainer(Result); + return MaybeBindToTemporary(Result); } @@ -2805,8 +2748,7 @@ static void RemoveSelectorFromWarningCache(Sema &S, Expr* Arg) { dyn_cast<ObjCSelectorExpr>(Arg->IgnoreParenCasts())) { Selector Sel = OSE->getSelector(); SourceLocation Loc = OSE->getAtLoc(); - llvm::DenseMap<Selector, SourceLocation>::iterator Pos - = S.ReferencedSelectors.find(Sel); + auto Pos = S.ReferencedSelectors.find(Sel); if (Pos != S.ReferencedSelectors.end() && Pos->second == Loc) S.ReferencedSelectors.erase(Pos); } @@ -3028,17 +2970,20 @@ namespace { /// Some declaration references are okay. ACCResult VisitDeclRefExpr(DeclRefExpr *e) { - // References to global constants from system headers are okay. - // These are things like 'kCFStringTransformToLatin'. They are - // can also be assumed to be immune to retains. VarDecl *var = dyn_cast<VarDecl>(e->getDecl()); + // References to global constants are okay. if (isAnyRetainable(TargetClass) && isAnyRetainable(SourceClass) && var && var->getStorageClass() == SC_Extern && - var->getType().isConstQualified() && - Context.getSourceManager().isInSystemHeader(var->getLocation())) { - return ACC_bottom; + var->getType().isConstQualified()) { + + // In system headers, they can also be assumed to be immune to retains. + // These are things like 'kCFStringTransformToLatin'. + if (Context.getSourceManager().isInSystemHeader(var->getLocation())) + return ACC_bottom; + + return ACC_plusZero; } // Nothing else. @@ -3421,7 +3366,7 @@ static bool CheckObjCBridgeNSCast(Sema &S, QualType castType, Expr *castExpr, ObjCInterfaceDecl *CastClass = InterfacePointerType->getObjectType()->getInterface(); if ((CastClass == ExprClass) || - (CastClass && ExprClass->isSuperClassOf(CastClass))) + (CastClass && CastClass->isSuperClassOf(ExprClass))) return true; if (warn) S.Diag(castExpr->getLocStart(), diag::warn_objc_invalid_bridge) @@ -3444,12 +3389,13 @@ static bool CheckObjCBridgeNSCast(Sema &S, QualType castType, Expr *castExpr, return false; } } + } else if (!castType->isObjCIdType()) { + S.Diag(castExpr->getLocStart(), diag::err_objc_cf_bridged_not_interface) + << castExpr->getType() << Parm; + S.Diag(TDNDecl->getLocStart(), diag::note_declared_at); + if (Target) + S.Diag(Target->getLocStart(), diag::note_declared_at); } - S.Diag(castExpr->getLocStart(), diag::err_objc_cf_bridged_not_interface) - << castExpr->getType() << Parm; - S.Diag(TDNDecl->getLocStart(), diag::note_declared_at); - if (Target) - S.Diag(Target->getLocStart(), diag::note_declared_at); return true; } return false; @@ -3469,6 +3415,9 @@ static bool CheckObjCBridgeCFCast(Sema &S, QualType castType, Expr *castExpr, if (TB *ObjCBAttr = getObjCBridgeAttr<TB>(TD)) { if (IdentifierInfo *Parm = ObjCBAttr->getBridgedType()) { HadTheAttribute = true; + if (Parm->isStr("id")) + return true; + NamedDecl *Target = nullptr; // Check for an existing type with this name. LookupResult R(S, DeclarationName(Parm), SourceLocation(), diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaFixItUtils.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaFixItUtils.cpp index 32b56bc..2e327ec 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaFixItUtils.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaFixItUtils.cpp @@ -161,11 +161,8 @@ bool ConversionFixItGenerator::tryToFixConversion(const Expr *FullExpr, } static bool isMacroDefined(const Sema &S, SourceLocation Loc, StringRef Name) { - const IdentifierInfo *II = &S.getASTContext().Idents.get(Name); - if (!II->hadMacroDefinition()) return false; - - MacroDirective *Macro = S.PP.getMacroDirectiveHistory(II); - return Macro && Macro->findDirectiveAtLoc(Loc, S.getSourceManager()); + return (bool)S.PP.getMacroDefinitionAtLoc(&S.getASTContext().Idents.get(Name), + Loc); } static std::string getScalarZeroExpressionForType( diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp index 2ad6754..610e0a9 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp @@ -150,7 +150,7 @@ static void CheckStringInit(Expr *Str, QualType &DeclT, const ArrayType *AT, Sema &S) { // Get the length of the string as parsed. auto *ConstantArrayTy = - cast<ConstantArrayType>(Str->getType()->getUnqualifiedDesugaredType()); + cast<ConstantArrayType>(Str->getType()->getAsArrayTypeUnsafe()); uint64_t StrLength = ConstantArrayTy->getSize().getZExtValue(); if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(AT)) { @@ -640,6 +640,9 @@ InitListChecker::InitListChecker(Sema &S, const InitializedEntity &Entity, InitListExpr *IL, QualType &T, bool VerifyOnly) : SemaRef(S), VerifyOnly(VerifyOnly) { + // FIXME: Check that IL isn't already the semantic form of some other + // InitListExpr. If it is, we'd create a broken AST. + hadError = false; FullyStructuredList = @@ -751,6 +754,68 @@ void InitListChecker::CheckImplicitInitList(const InitializedEntity &Entity, } } +/// Warn that \p Entity was of scalar type and was initialized by a +/// single-element braced initializer list. +static void warnBracedScalarInit(Sema &S, const InitializedEntity &Entity, + SourceRange Braces) { + // Don't warn during template instantiation. If the initialization was + // non-dependent, we warned during the initial parse; otherwise, the + // type might not be scalar in some uses of the template. + if (!S.ActiveTemplateInstantiations.empty()) + return; + + unsigned DiagID = 0; + + switch (Entity.getKind()) { + case InitializedEntity::EK_VectorElement: + case InitializedEntity::EK_ComplexElement: + case InitializedEntity::EK_ArrayElement: + case InitializedEntity::EK_Parameter: + case InitializedEntity::EK_Parameter_CF_Audited: + case InitializedEntity::EK_Result: + // Extra braces here are suspicious. + DiagID = diag::warn_braces_around_scalar_init; + break; + + case InitializedEntity::EK_Member: + // Warn on aggregate initialization but not on ctor init list or + // default member initializer. + if (Entity.getParent()) + DiagID = diag::warn_braces_around_scalar_init; + break; + + case InitializedEntity::EK_Variable: + case InitializedEntity::EK_LambdaCapture: + // No warning, might be direct-list-initialization. + // FIXME: Should we warn for copy-list-initialization in these cases? + break; + + case InitializedEntity::EK_New: + case InitializedEntity::EK_Temporary: + case InitializedEntity::EK_CompoundLiteralInit: + // No warning, braces are part of the syntax of the underlying construct. + break; + + case InitializedEntity::EK_RelatedResult: + // No warning, we already warned when initializing the result. + break; + + case InitializedEntity::EK_Exception: + case InitializedEntity::EK_Base: + case InitializedEntity::EK_Delegating: + case InitializedEntity::EK_BlockElement: + llvm_unreachable("unexpected braced scalar init"); + } + + if (DiagID) { + S.Diag(Braces.getBegin(), DiagID) + << Braces + << FixItHint::CreateRemoval(Braces.getBegin()) + << FixItHint::CreateRemoval(Braces.getEnd()); + } +} + + /// Check whether the initializer \p IList (that was written with explicit /// braces) can be used to initialize an object of type \p T. /// @@ -826,12 +891,9 @@ void InitListChecker::CheckExplicitInitList(const InitializedEntity &Entity, } } - if (!VerifyOnly && T->isScalarType() && IList->getNumInits() == 1 && - !TopLevelObject) - SemaRef.Diag(IList->getLocStart(), diag::warn_braces_around_scalar_init) - << IList->getSourceRange() - << FixItHint::CreateRemoval(IList->getLocStart()) - << FixItHint::CreateRemoval(IList->getLocEnd()); + if (!VerifyOnly && T->isScalarType() && + IList->getNumInits() == 1 && !isa<InitListExpr>(IList->getInit(0))) + warnBracedScalarInit(SemaRef, Entity, IList->getSourceRange()); } void InitListChecker::CheckListElementTypes(const InitializedEntity &Entity, @@ -904,7 +966,7 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity, StructuredList, StructuredIndex); if (InitListExpr *SubInitList = dyn_cast<InitListExpr>(expr)) { - if (!ElemType->isRecordType() || ElemType->isAggregateType()) { + if (!SemaRef.getLangOpts().CPlusPlus) { InitListExpr *InnerStructuredList = getStructuredSubobjectInit(IList, Index, ElemType, StructuredList, StructuredIndex, @@ -915,8 +977,6 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity, ++Index; return; } - assert(SemaRef.getLangOpts().CPlusPlus && - "non-aggregate records are only possible in C++"); // C++ initialization is handled later. } else if (isa<ImplicitValueInitExpr>(expr)) { // This happens during template instantiation when we see an InitListExpr @@ -929,43 +989,24 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity, return; } - // FIXME: Need to handle atomic aggregate types with implicit init lists. - if (ElemType->isScalarType() || ElemType->isAtomicType()) - return CheckScalarType(Entity, IList, ElemType, Index, - StructuredList, StructuredIndex); - - assert((ElemType->isRecordType() || ElemType->isVectorType() || - ElemType->isArrayType()) && "Unexpected type"); - - if (const ArrayType *arrayType = SemaRef.Context.getAsArrayType(ElemType)) { - // arrayType can be incomplete if we're initializing a flexible - // array member. There's nothing we can do with the completed - // type here, though. - - if (IsStringInit(expr, arrayType, SemaRef.Context) == SIF_None) { - if (!VerifyOnly) { - CheckStringInit(expr, ElemType, arrayType, SemaRef); - UpdateStructuredListElement(StructuredList, StructuredIndex, expr); - } - ++Index; - return; - } - - // Fall through for subaggregate initialization. - - } else if (SemaRef.getLangOpts().CPlusPlus) { - // C++ [dcl.init.aggr]p12: - // All implicit type conversions (clause 4) are considered when - // initializing the aggregate member with an initializer from - // an initializer-list. If the initializer can initialize a - // member, the member is initialized. [...] + if (SemaRef.getLangOpts().CPlusPlus) { + // C++ [dcl.init.aggr]p2: + // Each member is copy-initialized from the corresponding + // initializer-clause. // FIXME: Better EqualLoc? InitializationKind Kind = InitializationKind::CreateCopy(expr->getLocStart(), SourceLocation()); - InitializationSequence Seq(SemaRef, Entity, Kind, expr); + InitializationSequence Seq(SemaRef, Entity, Kind, expr, + /*TopLevelOfInitList*/ true); - if (Seq) { + // C++14 [dcl.init.aggr]p13: + // If the assignment-expression can initialize a member, the member is + // initialized. Otherwise [...] brace elision is assumed + // + // Brace elision is never performed if the element is not an + // assignment-expression. + if (Seq || isa<InitListExpr>(expr)) { if (!VerifyOnly) { ExprResult Result = Seq.Perform(SemaRef, Entity, Kind, expr); @@ -974,13 +1015,38 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity, UpdateStructuredListElement(StructuredList, StructuredIndex, Result.getAs<Expr>()); - } + } else if (!Seq) + hadError = true; ++Index; return; } // Fall through for subaggregate initialization + } else if (ElemType->isScalarType() || ElemType->isAtomicType()) { + // FIXME: Need to handle atomic aggregate types with implicit init lists. + return CheckScalarType(Entity, IList, ElemType, Index, + StructuredList, StructuredIndex); + } else if (const ArrayType *arrayType = + SemaRef.Context.getAsArrayType(ElemType)) { + // arrayType can be incomplete if we're initializing a flexible + // array member. There's nothing we can do with the completed + // type here, though. + + if (IsStringInit(expr, arrayType, SemaRef.Context) == SIF_None) { + if (!VerifyOnly) { + CheckStringInit(expr, ElemType, arrayType, SemaRef); + UpdateStructuredListElement(StructuredList, StructuredIndex, expr); + } + ++Index; + return; + } + + // Fall through for subaggregate initialization. + } else { + assert((ElemType->isRecordType() || ElemType->isVectorType()) && + "Unexpected type"); + // C99 6.7.8p13: // // The initializer for a structure or union object that has @@ -990,10 +1056,8 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity, // initial value of the object, including unnamed members, is // that of the expression. ExprResult ExprRes = expr; - if ((ElemType->isRecordType() || ElemType->isVectorType()) && - SemaRef.CheckSingleAssignmentConstraints(ElemType, ExprRes, - !VerifyOnly) - != Sema::Incompatible) { + if (SemaRef.CheckSingleAssignmentConstraints( + ElemType, ExprRes, !VerifyOnly) != Sema::Incompatible) { if (ExprRes.isInvalid()) hadError = true; else { @@ -3037,6 +3101,28 @@ void InitializationSequence::SetOverloadFailure(FailureKind Failure, // Attempt initialization //===----------------------------------------------------------------------===// +/// Tries to add a zero initializer. Returns true if that worked. +static bool +maybeRecoverWithZeroInitialization(Sema &S, InitializationSequence &Sequence, + const InitializedEntity &Entity) { + if (Entity.getKind() != InitializedEntity::EK_Variable) + return false; + + VarDecl *VD = cast<VarDecl>(Entity.getDecl()); + if (VD->getInit() || VD->getLocEnd().isMacroID()) + return false; + + QualType VariableTy = VD->getType().getCanonicalType(); + SourceLocation Loc = S.getLocForEndOfToken(VD->getLocEnd()); + std::string Init = S.getFixItZeroInitializerForType(VariableTy, Loc); + if (!Init.empty()) { + Sequence.AddZeroInitializationStep(Entity.getType()); + Sequence.SetZeroInitializationFixit(Init, Loc); + return true; + } + return false; +} + static void MaybeProduceObjCObject(Sema &S, InitializationSequence &Sequence, const InitializedEntity &Entity) { @@ -3107,15 +3193,13 @@ static OverloadingResult ResolveConstructorOverload(Sema &S, SourceLocation DeclLoc, MultiExprArg Args, OverloadCandidateSet &CandidateSet, - ArrayRef<NamedDecl *> Ctors, + DeclContext::lookup_result Ctors, OverloadCandidateSet::iterator &Best, bool CopyInitializing, bool AllowExplicit, - bool OnlyListConstructors, bool InitListSyntax) { + bool OnlyListConstructors, bool IsListInit) { CandidateSet.clear(); - for (ArrayRef<NamedDecl *>::iterator - Con = Ctors.begin(), ConEnd = Ctors.end(); Con != ConEnd; ++Con) { - NamedDecl *D = *Con; + for (NamedDecl *D : Ctors) { DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess()); bool SuppressUserConversions = false; @@ -3129,19 +3213,21 @@ ResolveConstructorOverload(Sema &S, SourceLocation DeclLoc, Constructor = cast<CXXConstructorDecl>(D); // C++11 [over.best.ics]p4: - // However, when considering the argument of a constructor or - // user-defined conversion function that is a candidate: - // -- by 13.3.1.3 when invoked for the copying/moving of a temporary - // in the second step of a class copy-initialization, - // -- by 13.3.1.7 when passing the initializer list as a single - // argument or when the initializer list has exactly one elementand - // a conversion to some class X or reference to (possibly - // cv-qualified) X is considered for the first parameter of a - // constructor of X, or - // -- by 13.3.1.4, 13.3.1.5, or 13.3.1.6 in all cases, - // only standard conversion sequences and ellipsis conversion sequences - // are considered. - if ((CopyInitializing || (InitListSyntax && Args.size() == 1)) && + // ... and the constructor or user-defined conversion function is a + // candidate by + // - 13.3.1.3, when the argument is the temporary in the second step + // of a class copy-initialization, or + // - 13.3.1.4, 13.3.1.5, or 13.3.1.6 (in all cases), + // user-defined conversion sequences are not considered. + // FIXME: This breaks backward compatibility, e.g. PR12117. As a + // temporary fix, let's re-instate the third bullet above until + // there is a resolution in the standard, i.e., + // - 13.3.1.7 when the initializer list has exactly one element that is + // itself an initializer list and a conversion to some class X or + // reference to (possibly cv-qualified) X is considered for the first + // parameter of a constructor of X. + if ((CopyInitializing || + (IsListInit && Args.size() == 1 && isa<InitListExpr>(Args[0]))) && Constructor->isCopyOrMoveConstructor()) SuppressUserConversions = true; } @@ -3178,16 +3264,19 @@ ResolveConstructorOverload(Sema &S, SourceLocation DeclLoc, /// \brief Attempt initialization by constructor (C++ [dcl.init]), which /// enumerates the constructors of the initialized entity and performs overload /// resolution to select the best. -/// If InitListSyntax is true, this is list-initialization of a non-aggregate -/// class type. +/// \param IsListInit Is this list-initialization? +/// \param IsInitListCopy Is this non-list-initialization resulting from a +/// list-initialization from {x} where x is the same +/// type as the entity? static void TryConstructorInitialization(Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Args, QualType DestType, InitializationSequence &Sequence, - bool InitListSyntax = false) { - assert((!InitListSyntax || (Args.size() == 1 && isa<InitListExpr>(Args[0]))) && - "InitListSyntax must come with a single initializer list argument."); + bool IsListInit = false, + bool IsInitListCopy = false) { + assert((!IsListInit || (Args.size() == 1 && isa<InitListExpr>(Args[0]))) && + "IsListInit must come with a single initializer list argument."); // The type we're constructing needs to be complete. if (S.RequireCompleteType(Kind.getLocation(), DestType, 0)) { @@ -3206,29 +3295,28 @@ static void TryConstructorInitialization(Sema &S, // Determine whether we are allowed to call explicit constructors or // explicit conversion operators. - bool AllowExplicit = Kind.AllowExplicit() || InitListSyntax; + bool AllowExplicit = Kind.AllowExplicit() || IsListInit; bool CopyInitialization = Kind.getKind() == InitializationKind::IK_Copy; // - Otherwise, if T is a class type, constructors are considered. The // applicable constructors are enumerated, and the best one is chosen // through overload resolution. - DeclContext::lookup_result R = S.LookupConstructors(DestRecordDecl); - // The container holding the constructors can under certain conditions - // be changed while iterating (e.g. because of deserialization). - // To be safe we copy the lookup results to a new container. - SmallVector<NamedDecl*, 16> Ctors(R.begin(), R.end()); + DeclContext::lookup_result Ctors = S.LookupConstructors(DestRecordDecl); OverloadingResult Result = OR_No_Viable_Function; OverloadCandidateSet::iterator Best; bool AsInitializerList = false; - // C++11 [over.match.list]p1: - // When objects of non-aggregate type T are list-initialized, overload - // resolution selects the constructor in two phases: + // C++11 [over.match.list]p1, per DR1467: + // When objects of non-aggregate type T are list-initialized, such that + // 8.5.4 [dcl.init.list] specifies that overload resolution is performed + // according to the rules in this section, overload resolution selects + // the constructor in two phases: + // // - Initially, the candidate functions are the initializer-list // constructors of the class T and the argument list consists of the // initializer list as a single argument. - if (InitListSyntax) { + if (IsListInit) { InitListExpr *ILE = cast<InitListExpr>(Args[0]); AsInitializerList = true; @@ -3239,7 +3327,7 @@ static void TryConstructorInitialization(Sema &S, CandidateSet, Ctors, Best, CopyInitialization, AllowExplicit, /*OnlyListConstructor=*/true, - InitListSyntax); + IsListInit); // Time to unwrap the init list. Args = MultiExprArg(ILE->getInits(), ILE->getNumInits()); @@ -3256,10 +3344,10 @@ static void TryConstructorInitialization(Sema &S, CandidateSet, Ctors, Best, CopyInitialization, AllowExplicit, /*OnlyListConstructors=*/false, - InitListSyntax); + IsListInit); } if (Result) { - Sequence.SetOverloadFailure(InitListSyntax ? + Sequence.SetOverloadFailure(IsListInit ? InitializationSequence::FK_ListConstructorOverloadFailed : InitializationSequence::FK_ConstructorOverloadFailed, Result); @@ -3273,7 +3361,8 @@ static void TryConstructorInitialization(Sema &S, if (Kind.getKind() == InitializationKind::IK_Default && Entity.getType().isConstQualified() && !cast<CXXConstructorDecl>(Best->Function)->isUserProvided()) { - Sequence.SetFailed(InitializationSequence::FK_DefaultInitOfConst); + if (!maybeRecoverWithZeroInitialization(S, Sequence, Entity)) + Sequence.SetFailed(InitializationSequence::FK_DefaultInitOfConst); return; } @@ -3281,7 +3370,7 @@ static void TryConstructorInitialization(Sema &S, // In copy-list-initialization, if an explicit constructor is chosen, the // initializer is ill-formed. CXXConstructorDecl *CtorDecl = cast<CXXConstructorDecl>(Best->Function); - if (InitListSyntax && !Kind.AllowExplicit() && CtorDecl->isExplicit()) { + if (IsListInit && !Kind.AllowExplicit() && CtorDecl->isExplicit()) { Sequence.SetFailed(InitializationSequence::FK_ExplicitConstructor); return; } @@ -3289,10 +3378,9 @@ static void TryConstructorInitialization(Sema &S, // Add the constructor initialization step. Any cv-qualification conversion is // subsumed by the initialization. bool HadMultipleCandidates = (CandidateSet.size() > 1); - Sequence.AddConstructorInitializationStep(CtorDecl, - Best->FoundDecl.getAccess(), - DestType, HadMultipleCandidates, - InitListSyntax, AsInitializerList); + Sequence.AddConstructorInitializationStep( + CtorDecl, Best->FoundDecl.getAccess(), DestType, HadMultipleCandidates, + IsListInit | IsInitListCopy, AsInitializerList); } static bool @@ -3350,6 +3438,11 @@ static void TryReferenceListInitialization(Sema &S, Sequence.SetFailed(InitializationSequence::FK_ReferenceBindingToInitList); return; } + // Can't reference initialize a compound literal. + if (Entity.getKind() == InitializedEntity::EK_CompoundLiteralInit) { + Sequence.SetFailed(InitializationSequence::FK_ReferenceBindingToInitList); + return; + } QualType DestType = Entity.getType(); QualType cv1T1 = DestType->getAs<ReferenceType>()->getPointeeType(); @@ -3425,48 +3518,97 @@ static void TryListInitialization(Sema &S, TryReferenceListInitialization(S, Entity, Kind, InitList, Sequence); return; } - if (DestType->isRecordType()) { - if (S.RequireCompleteType(InitList->getLocStart(), DestType, 0)) { - Sequence.setIncompleteTypeFailure(DestType); - return; - } - // C++11 [dcl.init.list]p3: - // - If T is an aggregate, aggregate initialization is performed. - if (!DestType->isAggregateType()) { - if (S.getLangOpts().CPlusPlus11) { - // - Otherwise, if the initializer list has no elements and T is a - // class type with a default constructor, the object is - // value-initialized. - if (InitList->getNumInits() == 0) { - CXXRecordDecl *RD = DestType->getAsCXXRecordDecl(); - if (RD->hasDefaultConstructor()) { - TryValueInitialization(S, Entity, Kind, Sequence, InitList); - return; - } + if (DestType->isRecordType() && + S.RequireCompleteType(InitList->getLocStart(), DestType, 0)) { + Sequence.setIncompleteTypeFailure(DestType); + return; + } + + // C++11 [dcl.init.list]p3, per DR1467: + // - If T is a class type and the initializer list has a single element of + // type cv U, where U is T or a class derived from T, the object is + // initialized from that element (by copy-initialization for + // copy-list-initialization, or by direct-initialization for + // direct-list-initialization). + // - Otherwise, if T is a character array and the initializer list has a + // single element that is an appropriately-typed string literal + // (8.5.2 [dcl.init.string]), initialization is performed as described + // in that section. + // - Otherwise, if T is an aggregate, [...] (continue below). + if (S.getLangOpts().CPlusPlus11 && InitList->getNumInits() == 1) { + if (DestType->isRecordType()) { + QualType InitType = InitList->getInit(0)->getType(); + if (S.Context.hasSameUnqualifiedType(InitType, DestType) || + S.IsDerivedFrom(InitType, DestType)) { + Expr *InitAsExpr = InitList->getInit(0); + TryConstructorInitialization(S, Entity, Kind, InitAsExpr, DestType, + Sequence, /*InitListSyntax*/ false, + /*IsInitListCopy*/ true); + return; + } + } + if (const ArrayType *DestAT = S.Context.getAsArrayType(DestType)) { + Expr *SubInit[1] = {InitList->getInit(0)}; + if (!isa<VariableArrayType>(DestAT) && + IsStringInit(SubInit[0], DestAT, S.Context) == SIF_None) { + InitializationKind SubKind = + Kind.getKind() == InitializationKind::IK_DirectList + ? InitializationKind::CreateDirect(Kind.getLocation(), + InitList->getLBraceLoc(), + InitList->getRBraceLoc()) + : Kind; + Sequence.InitializeFrom(S, Entity, SubKind, SubInit, + /*TopLevelOfInitList*/ true); + + // TryStringLiteralInitialization() (in InitializeFrom()) will fail if + // the element is not an appropriately-typed string literal, in which + // case we should proceed as in C++11 (below). + if (Sequence) { + Sequence.RewrapReferenceInitList(Entity.getType(), InitList); + return; } + } + } + } - // - Otherwise, if T is a specialization of std::initializer_list<E>, - // an initializer_list object constructed [...] - if (TryInitializerListConstruction(S, InitList, DestType, Sequence)) + // C++11 [dcl.init.list]p3: + // - If T is an aggregate, aggregate initialization is performed. + if (DestType->isRecordType() && !DestType->isAggregateType()) { + if (S.getLangOpts().CPlusPlus11) { + // - Otherwise, if the initializer list has no elements and T is a + // class type with a default constructor, the object is + // value-initialized. + if (InitList->getNumInits() == 0) { + CXXRecordDecl *RD = DestType->getAsCXXRecordDecl(); + if (RD->hasDefaultConstructor()) { + TryValueInitialization(S, Entity, Kind, Sequence, InitList); return; + } + } - // - Otherwise, if T is a class type, constructors are considered. - Expr *InitListAsExpr = InitList; - TryConstructorInitialization(S, Entity, Kind, InitListAsExpr, DestType, - Sequence, /*InitListSyntax*/true); - } else - Sequence.SetFailed( - InitializationSequence::FK_InitListBadDestinationType); - return; - } + // - Otherwise, if T is a specialization of std::initializer_list<E>, + // an initializer_list object constructed [...] + if (TryInitializerListConstruction(S, InitList, DestType, Sequence)) + return; + + // - Otherwise, if T is a class type, constructors are considered. + Expr *InitListAsExpr = InitList; + TryConstructorInitialization(S, Entity, Kind, InitListAsExpr, DestType, + Sequence, /*InitListSyntax*/ true); + } else + Sequence.SetFailed(InitializationSequence::FK_InitListBadDestinationType); + return; } + if (S.getLangOpts().CPlusPlus && !DestType->isAggregateType() && InitList->getNumInits() == 1 && InitList->getInit(0)->getType()->isRecordType()) { // - Otherwise, if the initializer list has a single element of type E // [...references are handled above...], the object or reference is - // initialized from that element; if a narrowing conversion is required + // initialized from that element (by copy-initialization for + // copy-list-initialization, or by direct-initialization for + // direct-list-initialization); if a narrowing conversion is required // to convert the element to T, the program is ill-formed. // // Per core-24034, this is direct-initialization if we were performing @@ -3543,14 +3685,7 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S, // to see if there is a suitable conversion. CXXRecordDecl *T1RecordDecl = cast<CXXRecordDecl>(T1RecordType->getDecl()); - DeclContext::lookup_result R = S.LookupConstructors(T1RecordDecl); - // The container holding the constructors can under certain conditions - // be changed while iterating (e.g. because of deserialization). - // To be safe we copy the lookup results to a new container. - SmallVector<NamedDecl*, 16> Ctors(R.begin(), R.end()); - for (SmallVectorImpl<NamedDecl *>::iterator - CI = Ctors.begin(), CE = Ctors.end(); CI != CE; ++CI) { - NamedDecl *D = *CI; + for (NamedDecl *D : S.LookupConstructors(T1RecordDecl)) { DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess()); // Find the constructor (which may be a template). @@ -3586,11 +3721,8 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S, // functions. CXXRecordDecl *T2RecordDecl = cast<CXXRecordDecl>(T2RecordType->getDecl()); - std::pair<CXXRecordDecl::conversion_iterator, - CXXRecordDecl::conversion_iterator> - Conversions = T2RecordDecl->getVisibleConversionFunctions(); - for (CXXRecordDecl::conversion_iterator - I = Conversions.first, E = Conversions.second; I != E; ++I) { + const auto &Conversions = T2RecordDecl->getVisibleConversionFunctions(); + for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) { NamedDecl *D = *I; CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(D->getDeclContext()); if (isa<UsingShadowDecl>(D)) @@ -4127,7 +4259,8 @@ static void TryDefaultInitialization(Sema &S, // a const-qualified type T, T shall be a class type with a user-provided // default constructor. if (DestType.isConstQualified() && S.getLangOpts().CPlusPlus) { - Sequence.SetFailed(InitializationSequence::FK_DefaultInitOfConst); + if (!maybeRecoverWithZeroInitialization(S, Sequence, Entity)) + Sequence.SetFailed(InitializationSequence::FK_DefaultInitOfConst); return; } @@ -4218,11 +4351,9 @@ static void TryUserDefinedConversion(Sema &S, CXXRecordDecl *SourceRecordDecl = cast<CXXRecordDecl>(SourceRecordType->getDecl()); - std::pair<CXXRecordDecl::conversion_iterator, - CXXRecordDecl::conversion_iterator> - Conversions = SourceRecordDecl->getVisibleConversionFunctions(); - for (CXXRecordDecl::conversion_iterator - I = Conversions.first, E = Conversions.second; I != E; ++I) { + const auto &Conversions = + SourceRecordDecl->getVisibleConversionFunctions(); + for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) { NamedDecl *D = *I; CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(D->getDeclContext()); if (isa<UsingShadowDecl>(D)) @@ -4408,8 +4539,7 @@ static void checkIndirectCopyRestoreSource(Sema &S, Expr *src) { /// \brief Determine whether we have compatible array types for the /// purposes of GNU by-copy array initialization. -static bool hasCompatibleArrayTypes(ASTContext &Context, - const ArrayType *Dest, +static bool hasCompatibleArrayTypes(ASTContext &Context, const ArrayType *Dest, const ArrayType *Source) { // If the source and destination array types are equivalent, we're // done. @@ -4668,7 +4798,7 @@ void InitializationSequence::InitializeFrom(Sema &S, return; } - // Determine whether we should consider writeback conversions for + // Determine whether we should consider writeback conversions for // Objective-C ARC. bool allowObjCWritebackConversion = S.getLangOpts().ObjCAutoRefCount && Entity.isParameterKind(); @@ -5022,6 +5152,8 @@ static ExprResult CopyObject(Sema &S, const InitializedEntity &Entity, ExprResult CurInit, bool IsExtraneousCopy) { + if (CurInit.isInvalid()) + return CurInit; // Determine which class type we're copying to. Expr *CurInitExpr = (Expr *)CurInit.get(); CXXRecordDecl *Class = nullptr; @@ -5636,6 +5768,115 @@ static void DiagnoseNarrowingInInitList(Sema &S, QualType EntityType, const Expr *PostInit); +/// Provide warnings when std::move is used on construction. +static void CheckMoveOnConstruction(Sema &S, const Expr *InitExpr, + bool IsReturnStmt) { + if (!InitExpr) + return; + + QualType DestType = InitExpr->getType(); + if (!DestType->isRecordType()) + return; + + unsigned DiagID = 0; + if (IsReturnStmt) { + const CXXConstructExpr *CCE = + dyn_cast<CXXConstructExpr>(InitExpr->IgnoreParens()); + if (!CCE || CCE->getNumArgs() != 1) + return; + + if (!CCE->getConstructor()->isCopyOrMoveConstructor()) + return; + + InitExpr = CCE->getArg(0)->IgnoreImpCasts(); + + // Remove implicit temporary and constructor nodes. + if (const MaterializeTemporaryExpr *MTE = + dyn_cast<MaterializeTemporaryExpr>(InitExpr)) { + InitExpr = MTE->GetTemporaryExpr()->IgnoreImpCasts(); + while (const CXXConstructExpr *CCE = + dyn_cast<CXXConstructExpr>(InitExpr)) { + if (isa<CXXTemporaryObjectExpr>(CCE)) + return; + if (CCE->getNumArgs() == 0) + return; + if (CCE->getNumArgs() > 1 && !isa<CXXDefaultArgExpr>(CCE->getArg(1))) + return; + InitExpr = CCE->getArg(0); + } + InitExpr = InitExpr->IgnoreImpCasts(); + DiagID = diag::warn_redundant_move_on_return; + } + } + + // Find the std::move call and get the argument. + const CallExpr *CE = dyn_cast<CallExpr>(InitExpr->IgnoreParens()); + if (!CE || CE->getNumArgs() != 1) + return; + + const FunctionDecl *MoveFunction = CE->getDirectCallee(); + if (!MoveFunction || !MoveFunction->isInStdNamespace() || + !MoveFunction->getIdentifier() || + !MoveFunction->getIdentifier()->isStr("move")) + return; + + const Expr *Arg = CE->getArg(0)->IgnoreImplicit(); + + if (IsReturnStmt) { + const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Arg->IgnoreParenImpCasts()); + if (!DRE || DRE->refersToEnclosingVariableOrCapture()) + return; + + const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()); + if (!VD || !VD->hasLocalStorage()) + return; + + if (!VD->getType()->isRecordType()) + return; + + if (DiagID == 0) { + DiagID = S.Context.hasSameUnqualifiedType(DestType, VD->getType()) + ? diag::warn_pessimizing_move_on_return + : diag::warn_redundant_move_on_return; + } + } else { + DiagID = diag::warn_pessimizing_move_on_initialization; + const Expr *ArgStripped = Arg->IgnoreImplicit()->IgnoreParens(); + if (!ArgStripped->isRValue() || !ArgStripped->getType()->isRecordType()) + return; + } + + S.Diag(CE->getLocStart(), DiagID); + + // Get all the locations for a fix-it. Don't emit the fix-it if any location + // is within a macro. + SourceLocation CallBegin = CE->getCallee()->getLocStart(); + if (CallBegin.isMacroID()) + return; + SourceLocation RParen = CE->getRParenLoc(); + if (RParen.isMacroID()) + return; + SourceLocation LParen; + SourceLocation ArgLoc = Arg->getLocStart(); + + // Special testing for the argument location. Since the fix-it needs the + // location right before the argument, the argument location can be in a + // macro only if it is at the beginning of the macro. + while (ArgLoc.isMacroID() && + S.getSourceManager().isAtStartOfImmediateMacroExpansion(ArgLoc)) { + ArgLoc = S.getSourceManager().getImmediateExpansionRange(ArgLoc).first; + } + + if (LParen.isMacroID()) + return; + + LParen = ArgLoc.getLocWithOffset(-1); + + S.Diag(CE->getLocStart(), diag::note_remove_move) + << FixItHint::CreateRemoval(SourceRange(CallBegin, LParen)) + << FixItHint::CreateRemoval(SourceRange(RParen, RParen)); +} + ExprResult InitializationSequence::Perform(Sema &S, const InitializedEntity &Entity, @@ -5646,6 +5887,21 @@ InitializationSequence::Perform(Sema &S, Diagnose(S, Entity, Kind, Args); return ExprError(); } + if (!ZeroInitializationFixit.empty()) { + unsigned DiagID = diag::err_default_init_const; + if (Decl *D = Entity.getDecl()) + if (S.getLangOpts().MSVCCompat && D->hasAttr<SelectAnyAttr>()) + DiagID = diag::ext_default_init_const; + + // The initialization would have succeeded with this fixit. Since the fixit + // is on the error, we need to build a valid AST in this case, so this isn't + // handled in the Failed() branch above. + QualType DestType = Entity.getType(); + S.Diag(Kind.getLocation(), DiagID) + << DestType << (bool)DestType->getAs<RecordType>() + << FixItHint::CreateInsertion(ZeroInitializationFixitLoc, + ZeroInitializationFixit); + } if (getKind() == DependentSequence) { // If the declaration is a non-dependent, incomplete array type @@ -5820,15 +6076,6 @@ InitializationSequence::Perform(Sema &S, &BasePath, IgnoreBaseAccess)) return ExprError(); - if (S.BasePathInvolvesVirtualBase(BasePath)) { - QualType T = SourceType; - if (const PointerType *Pointer = T->getAs<PointerType>()) - T = Pointer->getPointeeType(); - if (const RecordType *RecordTy = T->getAs<RecordType>()) - S.MarkVTableUsed(CurInit.get()->getLocStart(), - cast<CXXRecordDecl>(RecordTy->getDecl())); - } - ExprValueKind VK = Step->Kind == SK_CastDerivedToBaseLValue ? VK_LValue : @@ -6359,6 +6606,12 @@ InitializationSequence::Perform(Sema &S, cast<FieldDecl>(Entity.getDecl()), CurInit.get()); + // Check for std::move on construction. + if (const Expr *E = CurInit.get()) { + CheckMoveOnConstruction(S, E, + Entity.getKind() == InitializedEntity::EK_Result); + } + return CurInit; } @@ -6455,26 +6708,6 @@ static void diagnoseListInit(Sema &S, const InitializedEntity &Entity, "Inconsistent init list check result."); } -/// Prints a fixit for adding a null initializer for |Entity|. Call this only -/// right after emitting a diagnostic. -static void maybeEmitZeroInitializationFixit(Sema &S, - InitializationSequence &Sequence, - const InitializedEntity &Entity) { - if (Entity.getKind() != InitializedEntity::EK_Variable) - return; - - VarDecl *VD = cast<VarDecl>(Entity.getDecl()); - if (VD->getInit() || VD->getLocEnd().isMacroID()) - return; - - QualType VariableTy = VD->getType().getCanonicalType(); - SourceLocation Loc = S.getLocForEndOfToken(VD->getLocEnd()); - std::string Init = S.getFixItZeroInitializerForType(VariableTy, Loc); - - S.Diag(Loc, diag::note_add_initializer) - << VD << FixItHint::CreateInsertion(Loc, Init); -} - bool InitializationSequence::Diagnose(Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind, @@ -6622,12 +6855,19 @@ bool InitializationSequence::Diagnose(Sema &S, << Args[0]->getSourceRange(); break; - case FK_ReferenceInitDropsQualifiers: + case FK_ReferenceInitDropsQualifiers: { + QualType SourceType = Args[0]->getType(); + QualType NonRefType = DestType.getNonReferenceType(); + Qualifiers DroppedQualifiers = + SourceType.getQualifiers() - NonRefType.getQualifiers(); + S.Diag(Kind.getLocation(), diag::err_reference_bind_drops_quals) - << DestType.getNonReferenceType() - << Args[0]->getType() + << SourceType + << NonRefType + << DroppedQualifiers.getCVRQualifiers() << Args[0]->getSourceRange(); break; + } case FK_ReferenceInitFailed: S.Diag(Kind.getLocation(), diag::err_reference_bind_failed) @@ -6659,9 +6899,9 @@ bool InitializationSequence::Diagnose(Sema &S, case FK_TooManyInitsForScalar: { SourceRange R; - if (InitListExpr *InitList = dyn_cast<InitListExpr>(Args[0])) - R = SourceRange(InitList->getInit(0)->getLocEnd(), - InitList->getLocEnd()); + auto *InitList = dyn_cast<InitListExpr>(Args[0]); + if (InitList && InitList->getNumInits() == 1) + R = SourceRange(InitList->getInit(0)->getLocEnd(), InitList->getLocEnd()); else R = SourceRange(Args.front()->getLocEnd(), Args.back()->getLocEnd()); @@ -6806,7 +7046,6 @@ bool InitializationSequence::Diagnose(Sema &S, } else { S.Diag(Kind.getLocation(), diag::err_default_init_const) << DestType << (bool)DestType->getAs<RecordType>(); - maybeEmitZeroInitializationFixit(S, *this, Entity); } break; @@ -7262,7 +7501,6 @@ Sema::PerformCopyInitialization(const InitializedEntity &Entity, EqualLoc, AllowExplicit); InitializationSequence Seq(*this, Entity, Kind, InitE, TopLevelOfInitList); - Init.get(); ExprResult Result = Seq.Perform(*this, Entity, Kind, InitE); diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp index 90a81f4..8220641 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp @@ -818,7 +818,6 @@ VarDecl *Sema::createLambdaInitCaptureVarDecl(SourceLocation Loc, NewVD->markUsed(Context); NewVD->setInit(Init); return NewVD; - } FieldDecl *Sema::buildInitCaptureField(LambdaScopeInfo *LSI, VarDecl *Var) { @@ -837,7 +836,8 @@ FieldDecl *Sema::buildInitCaptureField(LambdaScopeInfo *LSI, VarDecl *Var) { } void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, - Declarator &ParamInfo, Scope *CurScope) { + Declarator &ParamInfo, + Scope *CurScope) { // Determine if we're within a context where we know that the lambda will // be dependent, because there are template parameters in scope. bool KnownDependent = false; @@ -930,12 +930,8 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, PushDeclContext(CurScope, Method); // Build the lambda scope. - buildLambdaScope(LSI, Method, - Intro.Range, - Intro.Default, Intro.DefaultLoc, - ExplicitParams, - ExplicitResultType, - !Method->isConst()); + buildLambdaScope(LSI, Method, Intro.Range, Intro.Default, Intro.DefaultLoc, + ExplicitParams, ExplicitResultType, !Method->isConst()); // C++11 [expr.prim.lambda]p9: // A lambda-expression whose smallest enclosing scope is a block scope is a @@ -1137,7 +1133,7 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, void Sema::ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation) { - LambdaScopeInfo *LSI = getCurLambda(); + LambdaScopeInfo *LSI = cast<LambdaScopeInfo>(FunctionScopes.back()); // Leave the expression-evaluation context. DiscardCleanupsInEvaluationContext(); @@ -1340,22 +1336,27 @@ static void addBlockPointerConversion(Sema &S, SourceRange IntroducerRange, CXXRecordDecl *Class, CXXMethodDecl *CallOperator) { - const FunctionProtoType *Proto - = CallOperator->getType()->getAs<FunctionProtoType>(); - QualType BlockPtrTy; - { - FunctionProtoType::ExtProtoInfo ExtInfo = Proto->getExtProtoInfo(); - ExtInfo.TypeQuals = 0; - QualType FunctionTy = S.Context.getFunctionType( - Proto->getReturnType(), Proto->getParamTypes(), ExtInfo); - BlockPtrTy = S.Context.getBlockPointerType(FunctionTy); - } + const FunctionProtoType *Proto = + CallOperator->getType()->getAs<FunctionProtoType>(); + + // The function type inside the block pointer type is the same as the call + // operator with some tweaks. The calling convention is the default free + // function convention, and the type qualifications are lost. + FunctionProtoType::ExtProtoInfo BlockEPI = Proto->getExtProtoInfo(); + BlockEPI.ExtInfo = + BlockEPI.ExtInfo.withCallingConv(S.Context.getDefaultCallingConvention( + Proto->isVariadic(), /*IsCXXMethod=*/false)); + BlockEPI.TypeQuals = 0; + QualType FunctionTy = S.Context.getFunctionType( + Proto->getReturnType(), Proto->getParamTypes(), BlockEPI); + QualType BlockPtrTy = S.Context.getBlockPointerType(FunctionTy); + + FunctionProtoType::ExtProtoInfo ConversionEPI( + S.Context.getDefaultCallingConvention( + /*IsVariadic=*/false, /*IsCXXMethod=*/true)); + ConversionEPI.TypeQuals = Qualifiers::Const; + QualType ConvTy = S.Context.getFunctionType(BlockPtrTy, None, ConversionEPI); - FunctionProtoType::ExtProtoInfo ExtInfo(S.Context.getDefaultCallingConvention( - /*IsVariadic=*/false, /*IsCXXMethod=*/true)); - ExtInfo.TypeQuals = Qualifiers::Const; - QualType ConvTy = S.Context.getFunctionType(BlockPtrTy, None, ExtInfo); - SourceLocation Loc = IntroducerRange.getBegin(); DeclarationName Name = S.Context.DeclarationNames.getCXXConversionFunctionName( @@ -1374,15 +1375,131 @@ static void addBlockPointerConversion(Sema &S, Conversion->setImplicit(true); Class->addDecl(Conversion); } + +static ExprResult performLambdaVarCaptureInitialization( + Sema &S, LambdaScopeInfo::Capture &Capture, + FieldDecl *Field, + SmallVectorImpl<VarDecl *> &ArrayIndexVars, + SmallVectorImpl<unsigned> &ArrayIndexStarts) { + assert(Capture.isVariableCapture() && "not a variable capture"); + + auto *Var = Capture.getVariable(); + SourceLocation Loc = Capture.getLocation(); + + // C++11 [expr.prim.lambda]p21: + // When the lambda-expression is evaluated, the entities that + // are captured by copy are used to direct-initialize each + // corresponding non-static data member of the resulting closure + // object. (For array members, the array elements are + // direct-initialized in increasing subscript order.) These + // initializations are performed in the (unspecified) order in + // which the non-static data members are declared. + + // C++ [expr.prim.lambda]p12: + // An entity captured by a lambda-expression is odr-used (3.2) in + // the scope containing the lambda-expression. + ExprResult RefResult = S.BuildDeclarationNameExpr( + CXXScopeSpec(), DeclarationNameInfo(Var->getDeclName(), Loc), Var); + if (RefResult.isInvalid()) + return ExprError(); + Expr *Ref = RefResult.get(); + + QualType FieldType = Field->getType(); + + // When the variable has array type, create index variables for each + // dimension of the array. We use these index variables to subscript + // the source array, and other clients (e.g., CodeGen) will perform + // the necessary iteration with these index variables. + // + // FIXME: This is dumb. Add a proper AST representation for array + // copy-construction and use it here. + SmallVector<VarDecl *, 4> IndexVariables; + QualType BaseType = FieldType; + QualType SizeType = S.Context.getSizeType(); + ArrayIndexStarts.push_back(ArrayIndexVars.size()); + while (const ConstantArrayType *Array + = S.Context.getAsConstantArrayType(BaseType)) { + // Create the iteration variable for this array index. + IdentifierInfo *IterationVarName = nullptr; + { + SmallString<8> Str; + llvm::raw_svector_ostream OS(Str); + OS << "__i" << IndexVariables.size(); + IterationVarName = &S.Context.Idents.get(OS.str()); + } + VarDecl *IterationVar = VarDecl::Create( + S.Context, S.CurContext, Loc, Loc, IterationVarName, SizeType, + S.Context.getTrivialTypeSourceInfo(SizeType, Loc), SC_None); + IterationVar->setImplicit(); + IndexVariables.push_back(IterationVar); + ArrayIndexVars.push_back(IterationVar); + + // Create a reference to the iteration variable. + ExprResult IterationVarRef = + S.BuildDeclRefExpr(IterationVar, SizeType, VK_LValue, Loc); + assert(!IterationVarRef.isInvalid() && + "Reference to invented variable cannot fail!"); + IterationVarRef = S.DefaultLvalueConversion(IterationVarRef.get()); + assert(!IterationVarRef.isInvalid() && + "Conversion of invented variable cannot fail!"); + + // Subscript the array with this iteration variable. + ExprResult Subscript = + S.CreateBuiltinArraySubscriptExpr(Ref, Loc, IterationVarRef.get(), Loc); + if (Subscript.isInvalid()) + return ExprError(); + + Ref = Subscript.get(); + BaseType = Array->getElementType(); + } + + // Construct the entity that we will be initializing. For an array, this + // will be first element in the array, which may require several levels + // of array-subscript entities. + SmallVector<InitializedEntity, 4> Entities; + Entities.reserve(1 + IndexVariables.size()); + Entities.push_back(InitializedEntity::InitializeLambdaCapture( + Var->getIdentifier(), FieldType, Loc)); + for (unsigned I = 0, N = IndexVariables.size(); I != N; ++I) + Entities.push_back( + InitializedEntity::InitializeElement(S.Context, 0, Entities.back())); + + InitializationKind InitKind = InitializationKind::CreateDirect(Loc, Loc, Loc); + InitializationSequence Init(S, Entities.back(), InitKind, Ref); + return Init.Perform(S, Entities.back(), InitKind, Ref); +} ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, - Scope *CurScope, - bool IsInstantiation) { + Scope *CurScope) { + LambdaScopeInfo LSI = *cast<LambdaScopeInfo>(FunctionScopes.back()); + ActOnFinishFunctionBody(LSI.CallOperator, Body); + return BuildLambdaExpr(StartLoc, Body->getLocEnd(), &LSI); +} + +static LambdaCaptureDefault +mapImplicitCaptureStyle(CapturingScopeInfo::ImplicitCaptureStyle ICS) { + switch (ICS) { + case CapturingScopeInfo::ImpCap_None: + return LCD_None; + case CapturingScopeInfo::ImpCap_LambdaByval: + return LCD_ByCopy; + case CapturingScopeInfo::ImpCap_CapturedRegion: + case CapturingScopeInfo::ImpCap_LambdaByref: + return LCD_ByRef; + case CapturingScopeInfo::ImpCap_Block: + llvm_unreachable("block capture in lambda"); + } + llvm_unreachable("Unknown implicit capture style"); +} + +ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, + LambdaScopeInfo *LSI) { // Collect information from the lambda scope. SmallVector<LambdaCapture, 4> Captures; SmallVector<Expr *, 4> CaptureInits; - LambdaCaptureDefault CaptureDefault; - SourceLocation CaptureDefaultLoc; + SourceLocation CaptureDefaultLoc = LSI->CaptureDefaultLoc; + LambdaCaptureDefault CaptureDefault = + mapImplicitCaptureStyle(LSI->ImpCaptureStyle); CXXRecordDecl *Class; CXXMethodDecl *CallOperator; SourceRange IntroducerRange; @@ -1393,7 +1510,6 @@ ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, SmallVector<VarDecl *, 4> ArrayIndexVars; SmallVector<unsigned, 4> ArrayIndexStarts; { - LambdaScopeInfo *LSI = getCurLambda(); CallOperator = LSI->CallOperator; Class = LSI->Lambda; IntroducerRange = LSI->IntroducerRange; @@ -1401,11 +1517,21 @@ ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, ExplicitResultType = !LSI->HasImplicitReturnType; LambdaExprNeedsCleanups = LSI->ExprNeedsCleanups; ContainsUnexpandedParameterPack = LSI->ContainsUnexpandedParameterPack; - ArrayIndexVars.swap(LSI->ArrayIndexVars); - ArrayIndexStarts.swap(LSI->ArrayIndexStarts); + CallOperator->setLexicalDeclContext(Class); + Decl *TemplateOrNonTemplateCallOperatorDecl = + CallOperator->getDescribedFunctionTemplate() + ? CallOperator->getDescribedFunctionTemplate() + : cast<Decl>(CallOperator); + + TemplateOrNonTemplateCallOperatorDecl->setLexicalDeclContext(Class); + Class->addDecl(TemplateOrNonTemplateCallOperatorDecl); + + PopExpressionEvaluationContext(); + // Translate captures. - for (unsigned I = 0, N = LSI->Captures.size(); I != N; ++I) { + auto CurField = Class->field_begin(); + for (unsigned I = 0, N = LSI->Captures.size(); I != N; ++I, ++CurField) { LambdaScopeInfo::Capture From = LSI->Captures[I]; assert(!From.isBlockCapture() && "Cannot capture __block variables"); bool IsImplicit = I >= LSI->NumExplicitCaptures; @@ -1417,83 +1543,33 @@ ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, CaptureInits.push_back(new (Context) CXXThisExpr(From.getLocation(), getCurrentThisType(), /*isImplicit=*/true)); + ArrayIndexStarts.push_back(ArrayIndexVars.size()); continue; } if (From.isVLATypeCapture()) { Captures.push_back( LambdaCapture(From.getLocation(), IsImplicit, LCK_VLAType)); CaptureInits.push_back(nullptr); + ArrayIndexStarts.push_back(ArrayIndexVars.size()); continue; } VarDecl *Var = From.getVariable(); - LambdaCaptureKind Kind = From.isCopyCapture()? LCK_ByCopy : LCK_ByRef; + LambdaCaptureKind Kind = From.isCopyCapture() ? LCK_ByCopy : LCK_ByRef; Captures.push_back(LambdaCapture(From.getLocation(), IsImplicit, Kind, Var, From.getEllipsisLoc())); - CaptureInits.push_back(From.getInitExpr()); - } - - switch (LSI->ImpCaptureStyle) { - case CapturingScopeInfo::ImpCap_None: - CaptureDefault = LCD_None; - break; - - case CapturingScopeInfo::ImpCap_LambdaByval: - CaptureDefault = LCD_ByCopy; - break; - - case CapturingScopeInfo::ImpCap_CapturedRegion: - case CapturingScopeInfo::ImpCap_LambdaByref: - CaptureDefault = LCD_ByRef; - break; - - case CapturingScopeInfo::ImpCap_Block: - llvm_unreachable("block capture in lambda"); - break; - } - CaptureDefaultLoc = LSI->CaptureDefaultLoc; - - // C++11 [expr.prim.lambda]p4: - // If a lambda-expression does not include a - // trailing-return-type, it is as if the trailing-return-type - // denotes the following type: - // - // Skip for C++1y return type deduction semantics which uses - // different machinery. - // FIXME: Refactor and Merge the return type deduction machinery. - // FIXME: Assumes current resolution to core issue 975. - if (LSI->HasImplicitReturnType && !getLangOpts().CPlusPlus14) { - deduceClosureReturnType(*LSI); - - // - if there are no return statements in the - // compound-statement, or all return statements return - // either an expression of type void or no expression or - // braced-init-list, the type void; - if (LSI->ReturnType.isNull()) { - LSI->ReturnType = Context.VoidTy; + Expr *Init = From.getInitExpr(); + if (!Init) { + auto InitResult = performLambdaVarCaptureInitialization( + *this, From, *CurField, ArrayIndexVars, ArrayIndexStarts); + if (InitResult.isInvalid()) + return ExprError(); + Init = InitResult.get(); + } else { + ArrayIndexStarts.push_back(ArrayIndexVars.size()); } - - // Create a function type with the inferred return type. - const FunctionProtoType *Proto - = CallOperator->getType()->getAs<FunctionProtoType>(); - QualType FunctionTy = Context.getFunctionType( - LSI->ReturnType, Proto->getParamTypes(), Proto->getExtProtoInfo()); - CallOperator->setType(FunctionTy); + CaptureInits.push_back(Init); } - // C++ [expr.prim.lambda]p7: - // The lambda-expression's compound-statement yields the - // function-body (8.4) of the function call operator [...]. - ActOnFinishFunctionBody(CallOperator, Body, IsInstantiation); - CallOperator->setLexicalDeclContext(Class); - Decl *TemplateOrNonTemplateCallOperatorDecl = - CallOperator->getDescribedFunctionTemplate() - ? CallOperator->getDescribedFunctionTemplate() - : cast<Decl>(CallOperator); - - TemplateOrNonTemplateCallOperatorDecl->setLexicalDeclContext(Class); - Class->addDecl(TemplateOrNonTemplateCallOperatorDecl); - - PopExpressionEvaluationContext(); // C++11 [expr.prim.lambda]p6: // The closure type for a lambda-expression with no lambda-capture @@ -1529,7 +1605,7 @@ ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Captures, ExplicitParams, ExplicitResultType, CaptureInits, ArrayIndexVars, - ArrayIndexStarts, Body->getLocEnd(), + ArrayIndexStarts, EndLoc, ContainsUnexpandedParameterPack); if (!CurContext->isDependentContext()) { diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp index ad06872..c745b13 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp @@ -13,6 +13,7 @@ //===----------------------------------------------------------------------===// #include "clang/Sema/Lookup.h" #include "clang/AST/ASTContext.h" +#include "clang/AST/ASTMutationListener.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" @@ -23,7 +24,9 @@ #include "clang/AST/ExprCXX.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/LangOptions.h" +#include "clang/Lex/HeaderSearch.h" #include "clang/Lex/ModuleLoader.h" +#include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/Overload.h" @@ -194,10 +197,11 @@ namespace { const_iterator begin() const { return list.begin(); } const_iterator end() const { return list.end(); } - std::pair<const_iterator,const_iterator> + llvm::iterator_range<const_iterator> getNamespacesFor(DeclContext *DC) const { - return std::equal_range(begin(), end(), DC->getPrimaryContext(), - UnqualUsingEntry::Comparator()); + return llvm::make_range(std::equal_range(begin(), end(), + DC->getPrimaryContext(), + UnqualUsingEntry::Comparator())); } }; } @@ -413,6 +417,10 @@ void LookupResult::resolveKind() { if (!Unique.insert(D).second) { // If it's not unique, pull something off the back (and // continue at this index). + // FIXME: This is wrong. We need to take the more recent declaration in + // order to get the right type, default arguments, etc. We also need to + // prefer visible declarations to hidden ones (for redeclaration lookup + // in modules builds). Decls[I] = Decls[--N]; continue; } @@ -670,8 +678,8 @@ static bool LookupDirect(Sema &S, LookupResult &R, const DeclContext *DC) { DeclareImplicitMemberFunctionsWithName(S, R.getLookupName(), DC); // Perform lookup into this declaration context. - DeclContext::lookup_const_result DR = DC->lookup(R.getLookupName()); - for (DeclContext::lookup_const_iterator I = DR.begin(), E = DR.end(); I != E; + DeclContext::lookup_result DR = DC->lookup(R.getLookupName()); + for (DeclContext::lookup_iterator I = DR.begin(), E = DR.end(); I != E; ++I) { NamedDecl *D = *I; if ((D = R.getAcceptableDecl(D))) { @@ -765,11 +773,8 @@ CppNamespaceLookup(Sema &S, LookupResult &R, ASTContext &Context, // Perform direct name lookup into the namespaces nominated by the // using directives whose common ancestor is this namespace. - UnqualUsingDirectiveSet::const_iterator UI, UEnd; - std::tie(UI, UEnd) = UDirs.getNamespacesFor(NS); - - for (; UI != UEnd; ++UI) - if (LookupDirect(S, R, UI->getNominatedNamespace())) + for (const UnqualUsingEntry &UUE : UDirs.getNamespacesFor(NS)) + if (LookupDirect(S, R, UUE.getNominatedNamespace())) Found = true; R.resolveKind(); @@ -1167,8 +1172,59 @@ static Decl *getInstantiatedFrom(Decl *D, MemberSpecializationInfo *MSInfo) { return MSInfo->isExplicitSpecialization() ? D : MSInfo->getInstantiatedFrom(); } +Module *Sema::getOwningModule(Decl *Entity) { + // If it's imported, grab its owning module. + Module *M = Entity->getImportedOwningModule(); + if (M || !isa<NamedDecl>(Entity) || !cast<NamedDecl>(Entity)->isHidden()) + return M; + assert(!Entity->isFromASTFile() && + "hidden entity from AST file has no owning module"); + + // It's local and hidden; grab or compute its owning module. + M = Entity->getLocalOwningModule(); + if (M) + return M; + + if (auto *Containing = + PP.getModuleContainingLocation(Entity->getLocation())) { + M = Containing; + } else if (Entity->isInvalidDecl() || Entity->getLocation().isInvalid()) { + // Don't bother tracking visibility for invalid declarations with broken + // locations. + cast<NamedDecl>(Entity)->setHidden(false); + } else { + // We need to assign a module to an entity that exists outside of any + // module, so that we can hide it from modules that we textually enter. + // Invent a fake module for all such entities. + if (!CachedFakeTopLevelModule) { + CachedFakeTopLevelModule = + PP.getHeaderSearchInfo().getModuleMap().findOrCreateModule( + "<top-level>", nullptr, false, false).first; + + auto &SrcMgr = PP.getSourceManager(); + SourceLocation StartLoc = + SrcMgr.getLocForStartOfFile(SrcMgr.getMainFileID()); + auto &TopLevel = + VisibleModulesStack.empty() ? VisibleModules : VisibleModulesStack[0]; + TopLevel.setVisible(CachedFakeTopLevelModule, StartLoc); + } + + M = CachedFakeTopLevelModule; + } + + if (M) + Entity->setLocalOwningModule(M); + return M; +} + +void Sema::makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc) { + auto *M = PP.getModuleContainingLocation(Loc); + assert(M && "hidden definition not in any module"); + Context.mergeDefinitionIntoModule(ND, M); +} + /// \brief Find the module in which the given declaration was defined. -static Module *getDefiningModule(Decl *Entity) { +static Module *getDefiningModule(Sema &S, Decl *Entity) { if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Entity)) { // If this function was instantiated from a template, the defining module is // the module containing the pattern. @@ -1190,15 +1246,16 @@ static Module *getDefiningModule(Decl *Entity) { // from a template. DeclContext *Context = Entity->getDeclContext(); if (Context->isFileContext()) - return Entity->getOwningModule(); - return getDefiningModule(cast<Decl>(Context)); + return S.getOwningModule(Entity); + return getDefiningModule(S, cast<Decl>(Context)); } llvm::DenseSet<Module*> &Sema::getLookupModules() { unsigned N = ActiveTemplateInstantiations.size(); for (unsigned I = ActiveTemplateInstantiationLookupModules.size(); I != N; ++I) { - Module *M = getDefiningModule(ActiveTemplateInstantiations[I].Entity); + Module *M = + getDefiningModule(*this, ActiveTemplateInstantiations[I].Entity); if (M && !LookupModulesCache.insert(M).second) M = nullptr; ActiveTemplateInstantiationLookupModules.push_back(M); @@ -1206,6 +1263,13 @@ llvm::DenseSet<Module*> &Sema::getLookupModules() { return LookupModulesCache; } +bool Sema::hasVisibleMergedDefinition(NamedDecl *Def) { + for (Module *Merged : Context.getModulesWithMergedDefinition(Def)) + if (isModuleVisible(Merged)) + return true; + return false; +} + /// \brief Determine whether a declaration is visible to name lookup. /// /// This routine determines whether the declaration D is visible in the current @@ -1215,10 +1279,48 @@ llvm::DenseSet<Module*> &Sema::getLookupModules() { /// path (by instantiating a template, you allow it to see the declarations that /// your module can see, including those later on in your module). bool LookupResult::isVisibleSlow(Sema &SemaRef, NamedDecl *D) { - assert(D->isHidden() && !SemaRef.ActiveTemplateInstantiations.empty() && - "should not call this: not in slow case"); - Module *DeclModule = D->getOwningModule(); - assert(DeclModule && "hidden decl not from a module"); + assert(D->isHidden() && "should not call this: not in slow case"); + Module *DeclModule = SemaRef.getOwningModule(D); + if (!DeclModule) { + // getOwningModule() may have decided the declaration should not be hidden. + assert(!D->isHidden() && "hidden decl not from a module"); + return true; + } + + // If the owning module is visible, and the decl is not module private, + // then the decl is visible too. (Module private is ignored within the same + // top-level module.) + if (!D->isFromASTFile() || !D->isModulePrivate()) { + if (SemaRef.isModuleVisible(DeclModule)) + return true; + // Also check merged definitions. + if (SemaRef.getLangOpts().ModulesLocalVisibility && + SemaRef.hasVisibleMergedDefinition(D)) + return true; + } + + // If this declaration is not at namespace scope nor module-private, + // then it is visible if its lexical parent has a visible definition. + DeclContext *DC = D->getLexicalDeclContext(); + if (!D->isModulePrivate() && + DC && !DC->isFileContext() && !isa<LinkageSpecDecl>(DC)) { + // For a parameter, check whether our current template declaration's + // lexical context is visible, not whether there's some other visible + // definition of it, because parameters aren't "within" the definition. + if ((D->isTemplateParameter() || isa<ParmVarDecl>(D)) + ? isVisible(SemaRef, cast<NamedDecl>(DC)) + : SemaRef.hasVisibleDefinition(cast<NamedDecl>(DC))) { + if (SemaRef.ActiveTemplateInstantiations.empty() && + // FIXME: Do something better in this case. + !SemaRef.getLangOpts().ModulesLocalVisibility) { + // Cache the fact that this declaration is implicitly visible because + // its parent has a visible definition. + D->setHidden(false); + } + return true; + } + return false; + } // Find the extra places where we need to look. llvm::DenseSet<Module*> &LookupModules = SemaRef.getLookupModules(); @@ -1243,6 +1345,10 @@ bool LookupResult::isVisibleSlow(Sema &SemaRef, NamedDecl *D) { return false; } +bool Sema::isVisibleSlow(const NamedDecl *D) { + return LookupResult::isVisible(*this, const_cast<NamedDecl*>(D)); +} + /// \brief Retrieve the visible declaration corresponding to D, if any. /// /// This routine determines whether the declaration D is visible in the current @@ -1256,6 +1362,9 @@ static NamedDecl *findAcceptableDecl(Sema &SemaRef, NamedDecl *D) { for (auto RD : D->redecls()) { if (auto ND = dyn_cast<NamedDecl>(RD)) { + // FIXME: This is wrong in the case where the previous declaration is not + // visible in the same scope as D. This needs to be done much more + // carefully. if (LookupResult::isVisible(SemaRef, ND)) return ND; } @@ -3016,17 +3125,45 @@ static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result, if (Visited.visitedContext(Ctx->getPrimaryContext())) return; + // Outside C++, lookup results for the TU live on identifiers. + if (isa<TranslationUnitDecl>(Ctx) && + !Result.getSema().getLangOpts().CPlusPlus) { + auto &S = Result.getSema(); + auto &Idents = S.Context.Idents; + + // Ensure all external identifiers are in the identifier table. + if (IdentifierInfoLookup *External = Idents.getExternalIdentifierLookup()) { + std::unique_ptr<IdentifierIterator> Iter(External->getIdentifiers()); + for (StringRef Name = Iter->Next(); !Name.empty(); Name = Iter->Next()) + Idents.get(Name); + } + + // Walk all lookup results in the TU for each identifier. + for (const auto &Ident : Idents) { + for (auto I = S.IdResolver.begin(Ident.getValue()), + E = S.IdResolver.end(); + I != E; ++I) { + if (S.IdResolver.isDeclInScope(*I, Ctx)) { + if (NamedDecl *ND = Result.getAcceptableDecl(*I)) { + Consumer.FoundDecl(ND, Visited.checkHidden(ND), Ctx, InBaseClass); + Visited.add(ND); + } + } + } + } + + return; + } + if (CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(Ctx)) Result.getSema().ForceDeclarationOfImplicitMembers(Class); // Enumerate all of the results in this context. - for (const auto &R : Ctx->lookups()) { - for (auto *I : R) { - if (NamedDecl *ND = dyn_cast<NamedDecl>(I)) { - if ((ND = Result.getAcceptableDecl(ND))) { - Consumer.FoundDecl(ND, Visited.checkHidden(ND), Ctx, InBaseClass); - Visited.add(ND); - } + for (DeclContextLookupResult R : Ctx->lookups()) { + for (auto *D : R) { + if (auto *ND = Result.getAcceptableDecl(D)) { + Consumer.FoundDecl(ND, Visited.checkHidden(ND), Ctx, InBaseClass); + Visited.add(ND); } } } @@ -3209,10 +3346,8 @@ static void LookupVisibleDecls(Scope *S, LookupResult &Result, if (Entity) { // Lookup visible declarations in any namespaces found by using // directives. - UnqualUsingDirectiveSet::const_iterator UI, UEnd; - std::tie(UI, UEnd) = UDirs.getNamespacesFor(Entity); - for (; UI != UEnd; ++UI) - LookupVisibleDecls(const_cast<DeclContext *>(UI->getNominatedNamespace()), + for (const UnqualUsingEntry &UUE : UDirs.getNamespacesFor(Entity)) + LookupVisibleDecls(const_cast<DeclContext *>(UUE.getNominatedNamespace()), Result, /*QualifiedNameLookup=*/false, /*InBaseClass=*/false, Consumer, Visited); } @@ -3682,8 +3817,7 @@ void TypoCorrectionConsumer::performQualifiedLookups() { TypoCorrectionConsumer::NamespaceSpecifierSet::NamespaceSpecifierSet( ASTContext &Context, DeclContext *CurContext, CXXScopeSpec *CurScopeSpec) - : Context(Context), CurContextChain(buildContextChain(CurContext)), - isSorted(false) { + : Context(Context), CurContextChain(buildContextChain(CurContext)) { if (NestedNameSpecifier *NNS = CurScopeSpec ? CurScopeSpec->getScopeRep() : nullptr) { llvm::raw_string_ostream SpecifierOStream(CurNameSpecifier); @@ -3702,7 +3836,6 @@ TypoCorrectionConsumer::NamespaceSpecifierSet::NamespaceSpecifierSet( } // Add the global context as a NestedNameSpecifier - Distances.insert(1); SpecifierInfo SI = {cast<DeclContext>(Context.getTranslationUnitDecl()), NestedNameSpecifier::GlobalSpecifier(Context), 1}; DistanceMap[1].push_back(SI); @@ -3722,22 +3855,6 @@ auto TypoCorrectionConsumer::NamespaceSpecifierSet::buildContextChain( return Chain; } -void TypoCorrectionConsumer::NamespaceSpecifierSet::sortNamespaces() { - SmallVector<unsigned, 4> sortedDistances; - sortedDistances.append(Distances.begin(), Distances.end()); - - if (sortedDistances.size() > 1) - std::sort(sortedDistances.begin(), sortedDistances.end()); - - Specifiers.clear(); - for (auto D : sortedDistances) { - SpecifierInfoList &SpecList = DistanceMap[D]; - Specifiers.append(SpecList.begin(), SpecList.end()); - } - - isSorted = true; -} - unsigned TypoCorrectionConsumer::NamespaceSpecifierSet::buildNestedNameSpecifier( DeclContextList &DeclChain, NestedNameSpecifier *&NNS) { @@ -3818,8 +3935,6 @@ void TypoCorrectionConsumer::NamespaceSpecifierSet::addNameSpecifier( llvm::makeArrayRef(NewNameSpecifierIdentifiers)); } - isSorted = false; - Distances.insert(NumSpecifiers); SpecifierInfo SI = {Ctx, NNS, NumSpecifiers}; DistanceMap[NumSpecifiers].push_back(SI); } @@ -4294,7 +4409,7 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName, // Record the failure's location if needed and return an empty correction. If // this was an unqualified lookup and we believe the callback object did not // filter out possible corrections, also cache the failure for the typo. - return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure); + return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure && !SecondBestTC); } /// \brief Try to "correct" a typo in the source code by finding @@ -4347,9 +4462,7 @@ TypoExpr *Sema::CorrectTypoDelayed( TypoCorrection Empty; auto Consumer = makeTypoCorrectionConsumer( TypoName, LookupKind, S, SS, std::move(CCC), MemberContext, - EnteringContext, OPT, - /*SearchModules=*/(Mode == CTK_ErrorRecovery) && getLangOpts().Modules && - getLangOpts().ModulesSearchAll); + EnteringContext, OPT, Mode == CTK_ErrorRecovery); if (!Consumer || Consumer->empty()) return nullptr; @@ -4491,18 +4604,18 @@ void Sema::diagnoseTypo(const TypoCorrection &Correction, /// Find which declaration we should import to provide the definition of /// the given declaration. -static const NamedDecl *getDefinitionToImport(const NamedDecl *D) { - if (const VarDecl *VD = dyn_cast<VarDecl>(D)) +static NamedDecl *getDefinitionToImport(NamedDecl *D) { + if (VarDecl *VD = dyn_cast<VarDecl>(D)) return VD->getDefinition(); if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) - return FD->isDefined(FD) ? FD : nullptr; - if (const TagDecl *TD = dyn_cast<TagDecl>(D)) + return FD->isDefined(FD) ? const_cast<FunctionDecl*>(FD) : nullptr; + if (TagDecl *TD = dyn_cast<TagDecl>(D)) return TD->getDefinition(); - if (const ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) + if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) return ID->getDefinition(); - if (const ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) + if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) return PD->getDefinition(); - if (const TemplateDecl *TD = dyn_cast<TemplateDecl>(D)) + if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D)) return getDefinitionToImport(TD->getTemplatedDecl()); return nullptr; } @@ -4535,10 +4648,10 @@ void Sema::diagnoseTypo(const TypoCorrection &Correction, // Suggest importing a module providing the definition of this entity, if // possible. - const NamedDecl *Def = getDefinitionToImport(Decl); + NamedDecl *Def = getDefinitionToImport(Decl); if (!Def) Def = Decl; - Module *Owner = Def->getOwningModule(); + Module *Owner = getOwningModule(Def); assert(Owner && "definition of hidden declaration is not in a module"); Diag(Correction.getCorrectionRange().getBegin(), diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp index 72b6020..5e7b4b8 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp @@ -19,6 +19,7 @@ #include "clang/AST/ExprObjC.h" #include "clang/Basic/SourceManager.h" #include "clang/Lex/Lexer.h" +#include "clang/Lex/Preprocessor.h" #include "clang/Sema/Initialization.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/SmallString.h" @@ -406,9 +407,11 @@ Sema::HandlePropertyInClassExtension(Scope *S, // this conversion is safe only because the wider type is for a 'readonly' // property in primary class and 'narrowed' type for a 'readwrite' property // in continuation class. - if (!isa<ObjCObjectPointerType>(PIDecl->getType()) || - !isa<ObjCObjectPointerType>(PDecl->getType()) || - (!isObjCPointerConversion(PDecl->getType(), PIDecl->getType(), + QualType PrimaryClassPropertyT = Context.getCanonicalType(PIDecl->getType()); + QualType ClassExtPropertyT = Context.getCanonicalType(PDecl->getType()); + if (!isa<ObjCObjectPointerType>(PrimaryClassPropertyT) || + !isa<ObjCObjectPointerType>(ClassExtPropertyT) || + (!isObjCPointerConversion(ClassExtPropertyT, PrimaryClassPropertyT, ConvertedType, IncompatibleObjC)) || IncompatibleObjC) { Diag(AtLoc, @@ -1854,6 +1857,39 @@ void Sema::DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D Diag(PD->getLocation(), diag::err_cocoa_naming_owned_rule); else Diag(PD->getLocation(), diag::warn_cocoa_naming_owned_rule); + + // Look for a getter explicitly declared alongside the property. + // If we find one, use its location for the note. + SourceLocation noteLoc = PD->getLocation(); + SourceLocation fixItLoc; + for (auto *getterRedecl : method->redecls()) { + if (getterRedecl->isImplicit()) + continue; + if (getterRedecl->getDeclContext() != PD->getDeclContext()) + continue; + noteLoc = getterRedecl->getLocation(); + fixItLoc = getterRedecl->getLocEnd(); + } + + Preprocessor &PP = getPreprocessor(); + TokenValue tokens[] = { + tok::kw___attribute, tok::l_paren, tok::l_paren, + PP.getIdentifierInfo("objc_method_family"), tok::l_paren, + PP.getIdentifierInfo("none"), tok::r_paren, + tok::r_paren, tok::r_paren + }; + StringRef spelling = "__attribute__((objc_method_family(none)))"; + StringRef macroName = PP.getLastMacroWithSpelling(noteLoc, tokens); + if (!macroName.empty()) + spelling = macroName; + + auto noteDiag = Diag(noteLoc, diag::note_cocoa_naming_declare_family) + << method->getDeclName() << spelling; + if (fixItLoc.isValid()) { + SmallString<64> fixItText(" "); + fixItText += spelling; + noteDiag << FixItHint::CreateInsertion(fixItLoc, fixItText); + } } } } diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaOpenMP.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaOpenMP.cpp index d72942a..cfe8db3 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaOpenMP.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaOpenMP.cpp @@ -82,10 +82,12 @@ private: }; typedef llvm::SmallDenseMap<VarDecl *, DSAInfo, 64> DeclSAMapTy; typedef llvm::SmallDenseMap<VarDecl *, DeclRefExpr *, 64> AlignedMapTy; + typedef llvm::DenseSet<VarDecl *> LoopControlVariablesSetTy; struct SharingMapTy { DeclSAMapTy SharingMap; AlignedMapTy AlignedMap; + LoopControlVariablesSetTy LCVSet; DefaultDataSharingAttributes DefaultAttr; SourceLocation DefaultAttrLoc; OpenMPDirectiveKind Directive; @@ -93,22 +95,28 @@ private: Scope *CurScope; SourceLocation ConstructLoc; bool OrderedRegion; + unsigned CollapseNumber; SourceLocation InnerTeamsRegionLoc; SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name, Scope *CurScope, SourceLocation Loc) - : SharingMap(), AlignedMap(), DefaultAttr(DSA_unspecified), + : SharingMap(), AlignedMap(), LCVSet(), DefaultAttr(DSA_unspecified), Directive(DKind), DirectiveName(std::move(Name)), CurScope(CurScope), - ConstructLoc(Loc), OrderedRegion(false), InnerTeamsRegionLoc() {} + ConstructLoc(Loc), OrderedRegion(false), CollapseNumber(1), + InnerTeamsRegionLoc() {} SharingMapTy() - : SharingMap(), AlignedMap(), DefaultAttr(DSA_unspecified), + : SharingMap(), AlignedMap(), LCVSet(), DefaultAttr(DSA_unspecified), Directive(OMPD_unknown), DirectiveName(), CurScope(nullptr), - ConstructLoc(), OrderedRegion(false), InnerTeamsRegionLoc() {} + ConstructLoc(), OrderedRegion(false), CollapseNumber(1), + InnerTeamsRegionLoc() {} }; typedef SmallVector<SharingMapTy, 64> StackTy; /// \brief Stack of used declaration and their data-sharing attributes. StackTy Stack; + /// \brief true, if check for DSA must be from parent directive, false, if + /// from current directive. + bool FromParent; Sema &SemaRef; typedef SmallVector<SharingMapTy, 8>::reverse_iterator reverse_iterator; @@ -119,7 +127,10 @@ private: bool isOpenMPLocal(VarDecl *D, StackTy::reverse_iterator Iter); public: - explicit DSAStackTy(Sema &S) : Stack(1), SemaRef(S) {} + explicit DSAStackTy(Sema &S) : Stack(1), FromParent(false), SemaRef(S) {} + + bool isFromParent() const { return FromParent; } + void setFromParent(bool Flag) { FromParent = Flag; } void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc) { @@ -137,6 +148,12 @@ public: /// for diagnostics. DeclRefExpr *addUniqueAligned(VarDecl *D, DeclRefExpr *NewDE); + /// \brief Register specified variable as loop control variable. + void addLoopControlVariable(VarDecl *D); + /// \brief Check if the specified variable is a loop control variable for + /// current region. + bool isLoopControlVariable(VarDecl *D); + /// \brief Adds explicit data sharing attribute to the specified declaration. void addDSA(VarDecl *D, DeclRefExpr *E, OpenMPClauseKind A); @@ -209,6 +226,13 @@ public: return false; } + /// \brief Set collapse value for the region. + void setCollapseNumber(unsigned Val) { Stack.back().CollapseNumber = Val; } + /// \brief Return collapse value for region. + unsigned getCollapseNumber() const { + return Stack.back().CollapseNumber; + } + /// \brief Marks current target region as one with closely nested teams /// region. void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) { @@ -238,6 +262,7 @@ bool isParallelOrTaskRegion(OpenMPDirectiveKind DKind) { DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator Iter, VarDecl *D) { + D = D->getCanonicalDecl(); DSAVarData DVar; if (Iter == std::prev(Stack.rend())) { // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced @@ -308,8 +333,7 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator Iter, // bound to the current team is shared. if (DVar.DKind == OMPD_task) { DSAVarData DVarTemp; - for (StackTy::reverse_iterator I = std::next(Iter), - EE = std::prev(Stack.rend()); + for (StackTy::reverse_iterator I = std::next(Iter), EE = Stack.rend(); I != EE; ++I) { // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables // Referenced @@ -343,6 +367,7 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator Iter, DeclRefExpr *DSAStackTy::addUniqueAligned(VarDecl *D, DeclRefExpr *NewDE) { assert(Stack.size() > 1 && "Data sharing attributes stack is empty"); + D = D->getCanonicalDecl(); auto It = Stack.back().AlignedMap.find(D); if (It == Stack.back().AlignedMap.end()) { assert(NewDE && "Unexpected nullptr expr to be added into aligned map"); @@ -355,7 +380,20 @@ DeclRefExpr *DSAStackTy::addUniqueAligned(VarDecl *D, DeclRefExpr *NewDE) { return nullptr; } +void DSAStackTy::addLoopControlVariable(VarDecl *D) { + assert(Stack.size() > 1 && "Data-sharing attributes stack is empty"); + D = D->getCanonicalDecl(); + Stack.back().LCVSet.insert(D); +} + +bool DSAStackTy::isLoopControlVariable(VarDecl *D) { + assert(Stack.size() > 1 && "Data-sharing attributes stack is empty"); + D = D->getCanonicalDecl(); + return Stack.back().LCVSet.count(D) > 0; +} + void DSAStackTy::addDSA(VarDecl *D, DeclRefExpr *E, OpenMPClauseKind A) { + D = D->getCanonicalDecl(); if (A == OMPC_threadprivate) { Stack[0].SharingMap[D].Attributes = A; Stack[0].SharingMap[D].RefExpr = E; @@ -367,6 +405,7 @@ void DSAStackTy::addDSA(VarDecl *D, DeclRefExpr *E, OpenMPClauseKind A) { } bool DSAStackTy::isOpenMPLocal(VarDecl *D, StackTy::reverse_iterator Iter) { + D = D->getCanonicalDecl(); if (Stack.size() > 2) { reverse_iterator I = Iter, E = std::prev(Stack.rend()); Scope *TopScope = nullptr; @@ -385,16 +424,41 @@ bool DSAStackTy::isOpenMPLocal(VarDecl *D, StackTy::reverse_iterator Iter) { return false; } +/// \brief Build a variable declaration for OpenMP loop iteration variable. +static VarDecl *buildVarDecl(Sema &SemaRef, SourceLocation Loc, QualType Type, + StringRef Name) { + DeclContext *DC = SemaRef.CurContext; + IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name); + TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc); + VarDecl *Decl = + VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None); + Decl->setImplicit(); + return Decl; +} + +static DeclRefExpr *buildDeclRefExpr(Sema &S, VarDecl *D, QualType Ty, + SourceLocation Loc, + bool RefersToCapture = false) { + D->setReferenced(); + D->markUsed(S.Context); + return DeclRefExpr::Create(S.getASTContext(), NestedNameSpecifierLoc(), + SourceLocation(), D, RefersToCapture, Loc, Ty, + VK_LValue); +} + DSAStackTy::DSAVarData DSAStackTy::getTopDSA(VarDecl *D, bool FromParent) { + D = D->getCanonicalDecl(); DSAVarData DVar; // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced // in a Construct, C/C++, predetermined, p.1] // Variables appearing in threadprivate directives are threadprivate. if (D->getTLSKind() != VarDecl::TLS_None || - D->getStorageClass() == SC_Register) { - DVar.CKind = OMPC_threadprivate; - return DVar; + (D->getStorageClass() == SC_Register && D->hasAttr<AsmLabelAttr>() && + !D->isLocalVarDecl())) { + addDSA(D, buildDeclRefExpr(SemaRef, D, D->getType().getNonReferenceType(), + D->getLocation()), + OMPC_threadprivate); } if (Stack[0].SharingMap.count(D)) { DVar.RefExpr = Stack[0].SharingMap[D].RefExpr; @@ -421,29 +485,28 @@ DSAStackTy::DSAVarData DSAStackTy::getTopDSA(VarDecl *D, bool FromParent) { DVar.CKind = OMPC_private; return DVar; } - } - // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced - // in a Construct, C/C++, predetermined, p.4] - // Static data members are shared. - if (D->isStaticDataMember()) { - // Variables with const-qualified type having no mutable member may be - // listed in a firstprivate clause, even if they are static data members. - DSAVarData DVarTemp = hasDSA(D, MatchesAnyClause(OMPC_firstprivate), - MatchesAlways(), FromParent); - if (DVarTemp.CKind == OMPC_firstprivate && DVarTemp.RefExpr) - return DVar; + // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced + // in a Construct, C/C++, predetermined, p.4] + // Static data members are shared. + // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced + // in a Construct, C/C++, predetermined, p.7] + // Variables with static storage duration that are declared in a scope + // inside the construct are shared. + if (D->isStaticDataMember() || D->isStaticLocal()) { + DSAVarData DVarTemp = + hasDSA(D, isOpenMPPrivate, MatchesAlways(), FromParent); + if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr) + return DVar; - DVar.CKind = OMPC_shared; - return DVar; + DVar.CKind = OMPC_shared; + return DVar; + } } QualType Type = D->getType().getNonReferenceType().getCanonicalType(); bool IsConstant = Type.isConstant(SemaRef.getASTContext()); - while (Type->isArrayType()) { - QualType ElemType = cast<ArrayType>(Type.getTypePtr())->getElementType(); - Type = ElemType.getNonReferenceType().getCanonicalType(); - } + Type = SemaRef.getASTContext().getBaseElementType(Type); // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced // in a Construct, C/C++, predetermined, p.6] // Variables with const qualified type having no mutable member are @@ -463,15 +526,6 @@ DSAStackTy::DSAVarData DSAStackTy::getTopDSA(VarDecl *D, bool FromParent) { return DVar; } - // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced - // in a Construct, C/C++, predetermined, p.7] - // Variables with static storage duration that are declared in a scope - // inside the construct are shared. - if (D->isStaticLocal()) { - DVar.CKind = OMPC_shared; - return DVar; - } - // Explicitly specified attributes and local variables with predetermined // attributes. auto I = std::prev(StartI); @@ -485,6 +539,7 @@ DSAStackTy::DSAVarData DSAStackTy::getTopDSA(VarDecl *D, bool FromParent) { } DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(VarDecl *D, bool FromParent) { + D = D->getCanonicalDecl(); auto StartI = Stack.rbegin(); auto EndI = std::prev(Stack.rend()); if (FromParent && StartI != EndI) { @@ -497,6 +552,7 @@ template <class ClausesPredicate, class DirectivesPredicate> DSAStackTy::DSAVarData DSAStackTy::hasDSA(VarDecl *D, ClausesPredicate CPred, DirectivesPredicate DPred, bool FromParent) { + D = D->getCanonicalDecl(); auto StartI = std::next(Stack.rbegin()); auto EndI = std::prev(Stack.rend()); if (FromParent && StartI != EndI) { @@ -516,6 +572,7 @@ template <class ClausesPredicate, class DirectivesPredicate> DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(VarDecl *D, ClausesPredicate CPred, DirectivesPredicate DPred, bool FromParent) { + D = D->getCanonicalDecl(); auto StartI = std::next(Stack.rbegin()); auto EndI = std::prev(Stack.rend()); if (FromParent && StartI != EndI) { @@ -554,12 +611,17 @@ void Sema::InitDataSharingAttributesStack() { bool Sema::IsOpenMPCapturedVar(VarDecl *VD) { assert(LangOpts.OpenMP && "OpenMP is not allowed"); + VD = VD->getCanonicalDecl(); if (DSAStack->getCurrentDirective() != OMPD_unknown) { - auto DVarPrivate = DSAStack->getTopDSA(VD, /*FromParent=*/false); + if (DSAStack->isLoopControlVariable(VD) || + (VD->hasLocalStorage() && + isParallelOrTaskRegion(DSAStack->getCurrentDirective()))) + return true; + auto DVarPrivate = DSAStack->getTopDSA(VD, DSAStack->isFromParent()); if (DVarPrivate.CKind != OMPC_unknown && isOpenMPPrivate(DVarPrivate.CKind)) return true; DVarPrivate = DSAStack->hasDSA(VD, isOpenMPPrivate, MatchesAlways(), - /*FromParent=*/false); + DSAStack->isFromParent()); return DVarPrivate.CKind != OMPC_unknown; } return false; @@ -574,6 +636,14 @@ void Sema::StartOpenMPDSABlock(OpenMPDirectiveKind DKind, PushExpressionEvaluationContext(PotentiallyEvaluated); } +void Sema::StartOpenMPClauses() { + DSAStack->setFromParent(/*Flag=*/true); +} + +void Sema::EndOpenMPClauses() { + DSAStack->setFromParent(/*Flag=*/false); +} + void Sema::EndOpenMPDSABlock(Stmt *CurDirective) { // OpenMP [2.14.3.5, Restrictions, C/C++, p.1] // A variable of class type (or array thereof) that appears in a lastprivate @@ -581,46 +651,41 @@ void Sema::EndOpenMPDSABlock(Stmt *CurDirective) { // class type, unless the list item is also specified in a firstprivate // clause. if (auto D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) { - for (auto C : D->clauses()) { - if (auto Clause = dyn_cast<OMPLastprivateClause>(C)) { - for (auto VarRef : Clause->varlists()) { - if (VarRef->isValueDependent() || VarRef->isTypeDependent()) + for (auto *C : D->clauses()) { + if (auto *Clause = dyn_cast<OMPLastprivateClause>(C)) { + SmallVector<Expr *, 8> PrivateCopies; + for (auto *DE : Clause->varlists()) { + if (DE->isValueDependent() || DE->isTypeDependent()) { + PrivateCopies.push_back(nullptr); continue; - auto VD = cast<VarDecl>(cast<DeclRefExpr>(VarRef)->getDecl()); + } + auto *VD = cast<VarDecl>(cast<DeclRefExpr>(DE)->getDecl()); + QualType Type = VD->getType(); auto DVar = DSAStack->getTopDSA(VD, false); if (DVar.CKind == OMPC_lastprivate) { - SourceLocation ELoc = VarRef->getExprLoc(); - auto Type = VarRef->getType(); - if (Type->isArrayType()) - Type = QualType(Type->getArrayElementTypeNoTypeQual(), 0); - CXXRecordDecl *RD = - getLangOpts().CPlusPlus ? Type->getAsCXXRecordDecl() : nullptr; - // FIXME This code must be replaced by actual constructing of the - // lastprivate variable. - if (RD) { - CXXConstructorDecl *CD = LookupDefaultConstructor(RD); - PartialDiagnostic PD = - PartialDiagnostic(PartialDiagnostic::NullDiagnostic()); - if (!CD || - CheckConstructorAccess( - ELoc, CD, InitializedEntity::InitializeTemporary(Type), - CD->getAccess(), PD) == AR_inaccessible || - CD->isDeleted()) { - Diag(ELoc, diag::err_omp_required_method) - << getOpenMPClauseName(OMPC_lastprivate) << 0; - bool IsDecl = VD->isThisDeclarationADefinition(Context) == - VarDecl::DeclarationOnly; - Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl - : diag::note_defined_here) - << VD; - Diag(RD->getLocation(), diag::note_previous_decl) << RD; - continue; - } - MarkFunctionReferenced(ELoc, CD); - DiagnoseUseOfDecl(CD, ELoc); - } + // Generate helper private variable and initialize it with the + // default value. The address of the original variable is replaced + // by the address of the new private variable in CodeGen. This new + // variable is not added to IdResolver, so the code in the OpenMP + // region uses original variable for proper diagnostics. + auto *VDPrivate = + buildVarDecl(*this, DE->getExprLoc(), Type.getUnqualifiedType(), + VD->getName()); + ActOnUninitializedDecl(VDPrivate, /*TypeMayContainAuto=*/false); + if (VDPrivate->isInvalidDecl()) + continue; + PrivateCopies.push_back(buildDeclRefExpr( + *this, VDPrivate, DE->getType(), DE->getExprLoc())); + } else { + // The variable is also a firstprivate, so initialization sequence + // for private copy is generated already. + PrivateCopies.push_back(nullptr); } } + // Set initializers to private copies if no errors were found. + if (PrivateCopies.size() == Clause->varlist_size()) { + Clause->setPrivateCopies(PrivateCopies); + } } } } @@ -630,6 +695,10 @@ void Sema::EndOpenMPDSABlock(Stmt *CurDirective) { PopExpressionEvaluationContext(); } +static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV, + Expr *NumIterations, Sema &SemaRef, + Scope *S); + namespace { class VarDeclFilterCCC : public CorrectionCandidateCallback { @@ -763,14 +832,14 @@ ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope, // OpenMP [2.9.2, Restrictions, C/C++, p.2-6] // A threadprivate directive must lexically precede all references to any // of the variables in its list. - if (VD->isUsed()) { + if (VD->isUsed() && !DSAStack->isThreadPrivate(VD)) { Diag(Id.getLoc(), diag::err_omp_var_used) << getOpenMPDirectiveName(OMPD_threadprivate) << VD; return ExprError(); } QualType ExprType = VD->getType().getNonReferenceType(); - ExprResult DE = BuildDeclRefExpr(VD, ExprType, VK_LValue, Id.getLoc()); + ExprResult DE = buildDeclRefExpr(*this, VD, ExprType, Id.getLoc()); return DE; } @@ -821,6 +890,13 @@ Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList) { VarDecl *VD = cast<VarDecl>(DE->getDecl()); SourceLocation ILoc = DE->getExprLoc(); + QualType QType = VD->getType(); + if (QType->isDependentType() || QType->isInstantiationDependentType()) { + // It will be analyzed later. + Vars.push_back(DE); + continue; + } + // OpenMP [2.9.2, Restrictions, C/C++, p.10] // A threadprivate variable must not have an incomplete type. if (RequireCompleteType(ILoc, VD->getType(), @@ -843,7 +919,8 @@ Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList) { // Check if this is a TLS variable. if (VD->getTLSKind() != VarDecl::TLS_None || - VD->getStorageClass() == SC_Register) { + (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() && + !VD->isLocalVarDecl())) { Diag(ILoc, diag::err_omp_var_thread_local) << VD << ((VD->getTLSKind() != VarDecl::TLS_None) ? 0 : 1); bool IsDecl = @@ -1121,7 +1198,11 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { break; } case OMPD_parallel_sections: { + QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1); + QualType KmpInt32PtrTy = Context.getPointerType(KmpInt32Ty); Sema::CapturedParamNameType Params[] = { + std::make_pair(".global_tid.", KmpInt32PtrTy), + std::make_pair(".bound_tid.", KmpInt32PtrTy), std::make_pair(StringRef(), QualType()) // __context with shared vars }; ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, @@ -1129,11 +1210,28 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { break; } case OMPD_task: { + QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1); + QualType Args[] = {Context.VoidPtrTy.withConst().withRestrict()}; + FunctionProtoType::ExtProtoInfo EPI; + EPI.Variadic = true; + QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI); Sema::CapturedParamNameType Params[] = { + std::make_pair(".global_tid.", KmpInt32Ty), + std::make_pair(".part_id.", KmpInt32Ty), + std::make_pair(".privates.", + Context.VoidPtrTy.withConst().withRestrict()), + std::make_pair( + ".copy_fn.", + Context.getPointerType(CopyFnType).withConst().withRestrict()), std::make_pair(StringRef(), QualType()) // __context with shared vars }; ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, Params); + // Mark this captured region as inlined, because we don't use outlined + // function directly. + getCurCapturedRegion()->TheCapturedDecl->addAttr( + AlwaysInlineAttr::CreateImplicit( + Context, AlwaysInlineAttr::Keyword_forceinline, SourceRange())); break; } case OMPD_ordered: { @@ -1183,6 +1281,36 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { } } +StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S, + ArrayRef<OMPClause *> Clauses) { + if (!S.isUsable()) { + ActOnCapturedRegionError(); + return StmtError(); + } + // This is required for proper codegen. + for (auto *Clause : Clauses) { + if (isOpenMPPrivate(Clause->getClauseKind()) || + Clause->getClauseKind() == OMPC_copyprivate) { + // Mark all variables in private list clauses as used in inner region. + for (auto *VarRef : Clause->children()) { + if (auto *E = cast_or_null<Expr>(VarRef)) { + MarkDeclarationsReferencedInExpr(E); + } + } + } else if (isParallelOrTaskRegion(DSAStack->getCurrentDirective()) && + Clause->getClauseKind() == OMPC_schedule) { + // Mark all variables in private list clauses as used in inner region. + // Required for proper codegen of combined directives. + // TODO: add processing for other clauses. + if (auto *E = cast_or_null<Expr>( + cast<OMPScheduleClause>(Clause)->getHelperChunkSize())) { + MarkDeclarationsReferencedInExpr(E); + } + } + } + return ActOnCapturedRegionEnd(S.get()); +} + static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack, OpenMPDirectiveKind CurrentRegion, const DeclarationNameInfo &CurrentName, @@ -1914,7 +2042,7 @@ public: TestIsStrictOp(false), SubtractStep(false) {} /// \brief Check init-expr for canonical loop form and save loop counter /// variable - #Var and its initialization value - #LB. - bool CheckInit(Stmt *S); + bool CheckInit(Stmt *S, bool EmitDiags = true); /// \brief Check test-expr for canonical form, save upper-bound (#UB), flags /// for less/greater and for strict/non-strict comparison. bool CheckCond(Expr *S); @@ -1935,6 +2063,8 @@ public: bool ShouldSubtractStep() const { return SubtractStep; } /// \brief Build the expression to calculate the number of iterations. Expr *BuildNumIterations(Scope *S, const bool LimitedType) const; + /// \brief Build the precondition expression for the loops. + Expr *BuildPreCond(Scope *S, Expr *Cond) const; /// \brief Build reference expression to the counter be used for codegen. Expr *BuildCounterVar() const; /// \brief Build initization of the counter be used for codegen. @@ -2052,7 +2182,7 @@ bool OpenMPIterationSpaceChecker::SetStep(Expr *NewStep, bool Subtract) { return false; } -bool OpenMPIterationSpaceChecker::CheckInit(Stmt *S) { +bool OpenMPIterationSpaceChecker::CheckInit(Stmt *S, bool EmitDiags) { // Check init-expr for canonical loop form and save loop counter // variable - #Var and its initialization value - #LB. // OpenMP [2.6] Canonical loop form. init-expr may be one of the following: @@ -2062,7 +2192,9 @@ bool OpenMPIterationSpaceChecker::CheckInit(Stmt *S) { // pointer-type var = lb // if (!S) { - SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_init); + if (EmitDiags) { + SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_init); + } return true; } InitSrcRange = S->getSourceRange(); @@ -2078,7 +2210,7 @@ bool OpenMPIterationSpaceChecker::CheckInit(Stmt *S) { if (auto Var = dyn_cast_or_null<VarDecl>(DS->getSingleDecl())) { if (Var->hasInit()) { // Accept non-canonical init form here but emit ext. warning. - if (Var->getInitStyle() != VarDecl::CInit) + if (Var->getInitStyle() != VarDecl::CInit && EmitDiags) SemaRef.Diag(S->getLocStart(), diag::ext_omp_loop_not_canonical_init) << S->getSourceRange(); @@ -2092,8 +2224,10 @@ bool OpenMPIterationSpaceChecker::CheckInit(Stmt *S) { return SetVarAndLB(dyn_cast<VarDecl>(DRE->getDecl()), DRE, CE->getArg(1)); - SemaRef.Diag(S->getLocStart(), diag::err_omp_loop_not_canonical_init) - << S->getSourceRange(); + if (EmitDiags) { + SemaRef.Diag(S->getLocStart(), diag::err_omp_loop_not_canonical_init) + << S->getSourceRange(); + } return true; } @@ -2338,11 +2472,22 @@ OpenMPIterationSpaceChecker::BuildNumIterations(Scope *S, return Diff.get(); } +Expr *OpenMPIterationSpaceChecker::BuildPreCond(Scope *S, Expr *Cond) const { + // Try to build LB <op> UB, where <op> is <, >, <=, or >=. + bool Suppress = SemaRef.getDiagnostics().getSuppressAllDiagnostics(); + SemaRef.getDiagnostics().setSuppressAllDiagnostics(/*Val=*/true); + auto CondExpr = SemaRef.BuildBinOp( + S, DefaultLoc, TestIsLessOp ? (TestIsStrictOp ? BO_LT : BO_LE) + : (TestIsStrictOp ? BO_GT : BO_GE), + LB, UB); + SemaRef.getDiagnostics().setSuppressAllDiagnostics(Suppress); + // Otherwise use original loop conditon and evaluate it in runtime. + return CondExpr.isUsable() ? CondExpr.get() : Cond; +} + /// \brief Build reference expression to the counter be used for codegen. Expr *OpenMPIterationSpaceChecker::BuildCounterVar() const { - return DeclRefExpr::Create(SemaRef.Context, NestedNameSpecifierLoc(), - GetIncrementSrcRange().getBegin(), Var, false, - DefaultLoc, Var->getType(), VK_LValue); + return buildDeclRefExpr(SemaRef, Var, Var->getType(), DefaultLoc); } /// \brief Build initization of the counter be used for codegen. @@ -2353,6 +2498,8 @@ Expr *OpenMPIterationSpaceChecker::BuildCounterStep() const { return Step; } /// \brief Iteration space of a single for loop. struct LoopIterationSpace { + /// \brief Condition of the loop. + Expr *PreCond; /// \brief This expression calculates the number of iterations in the loop. /// It is always possible to calculate it before starting the loop. Expr *NumIterations; @@ -2375,6 +2522,20 @@ struct LoopIterationSpace { } // namespace +void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) { + assert(getLangOpts().OpenMP && "OpenMP is not active."); + assert(Init && "Expected loop in canonical form."); + unsigned CollapseIteration = DSAStack->getCollapseNumber(); + if (CollapseIteration > 0 && + isOpenMPLoopDirective(DSAStack->getCurrentDirective())) { + OpenMPIterationSpaceChecker ISC(*this, ForLoc); + if (!ISC.CheckInit(Init, /*EmitDiags=*/false)) { + DSAStack->addLoopControlVariable(ISC.GetLoopVar()); + } + DSAStack->setCollapseNumber(CollapseIteration - 1); + } +} + /// \brief Called on a for stmt to check and extract its iteration space /// for further processing (such as collapsing). static bool CheckOpenMPIterationSpace( @@ -2453,32 +2614,27 @@ static bool CheckOpenMPIterationSpace( ? ((NestedLoopCount == 1) ? OMPC_linear : OMPC_lastprivate) : OMPC_private; if (((isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown && - DVar.CKind != PredeterminedCKind) || + DVar.CKind != OMPC_threadprivate && DVar.CKind != PredeterminedCKind) || (isOpenMPWorksharingDirective(DKind) && !isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_private && - DVar.CKind != OMPC_lastprivate)) && - (DVar.CKind != OMPC_private || DVar.RefExpr != nullptr)) { + DVar.CKind != OMPC_lastprivate && DVar.CKind != OMPC_threadprivate)) && + ((DVar.CKind != OMPC_private && DVar.CKind != OMPC_threadprivate) || + DVar.RefExpr != nullptr)) { SemaRef.Diag(Init->getLocStart(), diag::err_omp_loop_var_dsa) << getOpenMPClauseName(DVar.CKind) << getOpenMPDirectiveName(DKind) << getOpenMPClauseName(PredeterminedCKind); - ReportOriginalDSA(SemaRef, &DSA, Var, DVar, true); + if (DVar.RefExpr == nullptr) + DVar.CKind = PredeterminedCKind; + ReportOriginalDSA(SemaRef, &DSA, Var, DVar, /*IsLoopIterVar=*/true); HasErrors = true; } else if (LoopVarRefExpr != nullptr) { // Make the loop iteration variable private (for worksharing constructs), // linear (for simd directives with the only one associated loop) or // lastprivate (for simd directives with several collapsed loops). - // FIXME: the next check and error message must be removed once the - // capturing of global variables in loops is fixed. if (DVar.CKind == OMPC_unknown) DVar = DSA.hasDSA(Var, isOpenMPPrivate, MatchesAlways(), /*FromParent=*/false); - if (!Var->hasLocalStorage() && DVar.CKind == OMPC_unknown) { - SemaRef.Diag(Init->getLocStart(), diag::err_omp_global_loop_var_dsa) - << getOpenMPClauseName(PredeterminedCKind) - << getOpenMPDirectiveName(DKind); - HasErrors = true; - } else - DSA.addDSA(Var, LoopVarRefExpr, PredeterminedCKind); + DSA.addDSA(Var, LoopVarRefExpr, PredeterminedCKind); } assert(isOpenMPLoopDirective(DKind) && "DSA for non-loop vars"); @@ -2493,6 +2649,7 @@ static bool CheckOpenMPIterationSpace( return HasErrors; // Build the loop's iteration space representation. + ResultIterSpace.PreCond = ISC.BuildPreCond(DSA.getCurScope(), For->getCond()); ResultIterSpace.NumIterations = ISC.BuildNumIterations( DSA.getCurScope(), /* LimitedType */ isOpenMPWorksharingDirective(DKind)); ResultIterSpace.CounterVar = ISC.BuildCounterVar(); @@ -2503,7 +2660,8 @@ static bool CheckOpenMPIterationSpace( ResultIterSpace.IncSrcRange = ISC.GetIncrementSrcRange(); ResultIterSpace.Subtract = ISC.ShouldSubtractStep(); - HasErrors |= (ResultIterSpace.NumIterations == nullptr || + HasErrors |= (ResultIterSpace.PreCond == nullptr || + ResultIterSpace.NumIterations == nullptr || ResultIterSpace.CounterVar == nullptr || ResultIterSpace.CounterInit == nullptr || ResultIterSpace.CounterStep == nullptr); @@ -2511,18 +2669,6 @@ static bool CheckOpenMPIterationSpace( return HasErrors; } -/// \brief Build a variable declaration for OpenMP loop iteration variable. -static VarDecl *BuildVarDecl(Sema &SemaRef, SourceLocation Loc, QualType Type, - StringRef Name) { - DeclContext *DC = SemaRef.CurContext; - IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name); - TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc); - VarDecl *Decl = - VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None); - Decl->setImplicit(); - return Decl; -} - /// \brief Build 'VarRef = Start + Iter * Step'. static ExprResult BuildCounterUpdate(Sema &SemaRef, Scope *S, SourceLocation Loc, ExprResult VarRef, @@ -2648,6 +2794,9 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr, // Last iteration number is (I1 * I2 * ... In) - 1, where I1, I2 ... In are // the iteration counts of the collapsed for loops. + // Precondition tests if there is at least one iteration (all conditions are + // true). + auto PreCond = ExprResult(IterSpaces[0].PreCond); auto N0 = IterSpaces[0].NumIterations; ExprResult LastIteration32 = WidenIterationCount(32 /* Bits */, N0, SemaRef); ExprResult LastIteration64 = WidenIterationCount(64 /* Bits */, N0, SemaRef); @@ -2660,6 +2809,10 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr, Scope *CurScope = DSA.getCurScope(); for (unsigned Cnt = 1; Cnt < NestedLoopCount; ++Cnt) { + if (PreCond.isUsable()) { + PreCond = SemaRef.BuildBinOp(CurScope, SourceLocation(), BO_LAnd, + PreCond.get(), IterSpaces[Cnt].PreCond); + } auto N = IterSpaces[Cnt].NumIterations; AllCountsNeedLessThan32Bits &= C.getTypeSize(N->getType()) < 32; if (LastIteration32.isUsable()) @@ -2703,10 +2856,10 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr, if (!IsConstant) { SourceLocation SaveLoc; VarDecl *SaveVar = - BuildVarDecl(SemaRef, SaveLoc, LastIteration.get()->getType(), + buildVarDecl(SemaRef, SaveLoc, LastIteration.get()->getType(), ".omp.last.iteration"); - ExprResult SaveRef = SemaRef.BuildDeclRefExpr( - SaveVar, LastIteration.get()->getType(), VK_LValue, SaveLoc); + ExprResult SaveRef = buildDeclRefExpr( + SemaRef, SaveVar, LastIteration.get()->getType(), SaveLoc); CalcLastIteration = SemaRef.BuildBinOp(CurScope, SaveLoc, BO_Assign, SaveRef.get(), LastIteration.get()); LastIteration = SaveRef; @@ -2721,25 +2874,20 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr, SourceLocation InitLoc = IterSpaces[0].InitSrcRange.getBegin(); - // Precondition tests if there is at least one iteration (LastIteration > 0). - ExprResult PreCond = SemaRef.BuildBinOp( - CurScope, InitLoc, BO_GT, LastIteration.get(), - SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get()); - QualType VType = LastIteration.get()->getType(); // Build variables passed into runtime, nesessary for worksharing directives. ExprResult LB, UB, IL, ST, EUB; if (isOpenMPWorksharingDirective(DKind)) { // Lower bound variable, initialized with zero. - VarDecl *LBDecl = BuildVarDecl(SemaRef, InitLoc, VType, ".omp.lb"); - LB = SemaRef.BuildDeclRefExpr(LBDecl, VType, VK_LValue, InitLoc); + VarDecl *LBDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.lb"); + LB = buildDeclRefExpr(SemaRef, LBDecl, VType, InitLoc); SemaRef.AddInitializerToDecl( LBDecl, SemaRef.ActOnIntegerConstant(InitLoc, 0).get(), /*DirectInit*/ false, /*TypeMayContainAuto*/ false); // Upper bound variable, initialized with last iteration number. - VarDecl *UBDecl = BuildVarDecl(SemaRef, InitLoc, VType, ".omp.ub"); - UB = SemaRef.BuildDeclRefExpr(UBDecl, VType, VK_LValue, InitLoc); + VarDecl *UBDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.ub"); + UB = buildDeclRefExpr(SemaRef, UBDecl, VType, InitLoc); SemaRef.AddInitializerToDecl(UBDecl, LastIteration.get(), /*DirectInit*/ false, /*TypeMayContainAuto*/ false); @@ -2747,15 +2895,15 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr, // A 32-bit variable-flag where runtime returns 1 for the last iteration. // This will be used to implement clause 'lastprivate'. QualType Int32Ty = SemaRef.Context.getIntTypeForBitwidth(32, true); - VarDecl *ILDecl = BuildVarDecl(SemaRef, InitLoc, Int32Ty, ".omp.is_last"); - IL = SemaRef.BuildDeclRefExpr(ILDecl, Int32Ty, VK_LValue, InitLoc); + VarDecl *ILDecl = buildVarDecl(SemaRef, InitLoc, Int32Ty, ".omp.is_last"); + IL = buildDeclRefExpr(SemaRef, ILDecl, Int32Ty, InitLoc); SemaRef.AddInitializerToDecl( ILDecl, SemaRef.ActOnIntegerConstant(InitLoc, 0).get(), /*DirectInit*/ false, /*TypeMayContainAuto*/ false); // Stride variable returned by runtime (we initialize it to 1 by default). - VarDecl *STDecl = BuildVarDecl(SemaRef, InitLoc, VType, ".omp.stride"); - ST = SemaRef.BuildDeclRefExpr(STDecl, VType, VK_LValue, InitLoc); + VarDecl *STDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.stride"); + ST = buildDeclRefExpr(SemaRef, STDecl, VType, InitLoc); SemaRef.AddInitializerToDecl( STDecl, SemaRef.ActOnIntegerConstant(InitLoc, 1).get(), /*DirectInit*/ false, /*TypeMayContainAuto*/ false); @@ -2775,8 +2923,8 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr, ExprResult IV; ExprResult Init; { - VarDecl *IVDecl = BuildVarDecl(SemaRef, InitLoc, VType, ".omp.iv"); - IV = SemaRef.BuildDeclRefExpr(IVDecl, VType, VK_LValue, InitLoc); + VarDecl *IVDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.iv"); + IV = buildDeclRefExpr(SemaRef, IVDecl, VType, InitLoc); Expr *RHS = isOpenMPWorksharingDirective(DKind) ? LB.get() : SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get(); @@ -2864,9 +3012,13 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr, break; } - // Build update: IS.CounterVar = IS.Start + Iter * IS.Step + // Build update: IS.CounterVar(Private) = IS.Start + Iter * IS.Step + auto *CounterVar = buildDeclRefExpr( + SemaRef, cast<VarDecl>(cast<DeclRefExpr>(IS.CounterVar)->getDecl()), + IS.CounterVar->getType(), IS.CounterVar->getExprLoc(), + /*RefersToCapture=*/true); ExprResult Update = - BuildCounterUpdate(SemaRef, CurScope, UpdLoc, IS.CounterVar, + BuildCounterUpdate(SemaRef, CurScope, UpdLoc, CounterVar, IS.CounterInit, Iter, IS.CounterStep, IS.Subtract); if (!Update.isUsable()) { HasErrors = true; @@ -2875,7 +3027,7 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr, // Build final: IS.CounterVar = IS.Start + IS.NumIters * IS.Step ExprResult Final = BuildCounterUpdate( - SemaRef, CurScope, UpdLoc, IS.CounterVar, IS.CounterInit, + SemaRef, CurScope, UpdLoc, CounterVar, IS.CounterInit, IS.NumIterations, IS.CounterStep, IS.Subtract); if (!Final.isUsable()) { HasErrors = true; @@ -2915,6 +3067,7 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr, // Save results Built.IterationVarRef = IV.get(); Built.LastIteration = LastIteration.get(); + Built.NumIterations = NumIterations.get(); Built.CalcLastIteration = CalcLastIteration.get(); Built.PreCond = PreCond.get(); Built.Cond = Cond.get(); @@ -2933,11 +3086,11 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr, } static Expr *GetCollapseNumberExpr(ArrayRef<OMPClause *> Clauses) { - auto CollapseFilter = [](const OMPClause *C) -> bool { + auto &&CollapseFilter = [](const OMPClause *C) -> bool { return C->getClauseKind() == OMPC_collapse; }; OMPExecutableDirective::filtered_clause_iterator<decltype(CollapseFilter)> I( - Clauses, CollapseFilter); + Clauses, std::move(CollapseFilter)); if (I) return cast<OMPCollapseClause>(*I)->getNumForLoops(); return nullptr; @@ -2958,6 +3111,16 @@ StmtResult Sema::ActOnOpenMPSimdDirective( assert((CurContext->isDependentContext() || B.builtAll()) && "omp simd loop exprs were not built"); + if (!CurContext->isDependentContext()) { + // Finalize the clauses that need pre-built expressions for CodeGen. + for (auto C : Clauses) { + if (auto LC = dyn_cast<OMPLinearClause>(C)) + if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef), + B.NumIterations, *this, CurScope)) + return StmtError(); + } + } + getCurFunction()->setHasBranchProtectedScope(); return OMPSimdDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); @@ -3055,6 +3218,23 @@ StmtResult Sema::ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, getCurFunction()->setHasBranchProtectedScope(); + // OpenMP [2.7.3, single Construct, Restrictions] + // The copyprivate clause must not be used with the nowait clause. + OMPClause *Nowait = nullptr; + OMPClause *Copyprivate = nullptr; + for (auto *Clause : Clauses) { + if (Clause->getClauseKind() == OMPC_nowait) + Nowait = Clause; + else if (Clause->getClauseKind() == OMPC_copyprivate) + Copyprivate = Clause; + if (Copyprivate && Nowait) { + Diag(Copyprivate->getLocStart(), + diag::err_omp_single_copyprivate_with_nowait); + Diag(Nowait->getLocStart(), diag::note_omp_nowait_clause_here); + return StmtError(); + } + } + return OMPSingleDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt); } @@ -3219,6 +3399,246 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(Stmt *AStmt, return OMPOrderedDirective::Create(Context, StartLoc, EndLoc, AStmt); } +namespace { +/// \brief Helper class for checking expression in 'omp atomic [update]' +/// construct. +class OpenMPAtomicUpdateChecker { + /// \brief Error results for atomic update expressions. + enum ExprAnalysisErrorCode { + /// \brief A statement is not an expression statement. + NotAnExpression, + /// \brief Expression is not builtin binary or unary operation. + NotABinaryOrUnaryExpression, + /// \brief Unary operation is not post-/pre- increment/decrement operation. + NotAnUnaryIncDecExpression, + /// \brief An expression is not of scalar type. + NotAScalarType, + /// \brief A binary operation is not an assignment operation. + NotAnAssignmentOp, + /// \brief RHS part of the binary operation is not a binary expression. + NotABinaryExpression, + /// \brief RHS part is not additive/multiplicative/shift/biwise binary + /// expression. + NotABinaryOperator, + /// \brief RHS binary operation does not have reference to the updated LHS + /// part. + NotAnUpdateExpression, + /// \brief No errors is found. + NoError + }; + /// \brief Reference to Sema. + Sema &SemaRef; + /// \brief A location for note diagnostics (when error is found). + SourceLocation NoteLoc; + /// \brief 'x' lvalue part of the source atomic expression. + Expr *X; + /// \brief 'expr' rvalue part of the source atomic expression. + Expr *E; + /// \brief Helper expression of the form + /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or + /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. + Expr *UpdateExpr; + /// \brief Is 'x' a LHS in a RHS part of full update expression. It is + /// important for non-associative operations. + bool IsXLHSInRHSPart; + BinaryOperatorKind Op; + SourceLocation OpLoc; + /// \brief true if the source expression is a postfix unary operation, false + /// if it is a prefix unary operation. + bool IsPostfixUpdate; + +public: + OpenMPAtomicUpdateChecker(Sema &SemaRef) + : SemaRef(SemaRef), X(nullptr), E(nullptr), UpdateExpr(nullptr), + IsXLHSInRHSPart(false), Op(BO_PtrMemD), IsPostfixUpdate(false) {} + /// \brief Check specified statement that it is suitable for 'atomic update' + /// constructs and extract 'x', 'expr' and Operation from the original + /// expression. If DiagId and NoteId == 0, then only check is performed + /// without error notification. + /// \param DiagId Diagnostic which should be emitted if error is found. + /// \param NoteId Diagnostic note for the main error message. + /// \return true if statement is not an update expression, false otherwise. + bool checkStatement(Stmt *S, unsigned DiagId = 0, unsigned NoteId = 0); + /// \brief Return the 'x' lvalue part of the source atomic expression. + Expr *getX() const { return X; } + /// \brief Return the 'expr' rvalue part of the source atomic expression. + Expr *getExpr() const { return E; } + /// \brief Return the update expression used in calculation of the updated + /// value. Always has form 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or + /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. + Expr *getUpdateExpr() const { return UpdateExpr; } + /// \brief Return true if 'x' is LHS in RHS part of full update expression, + /// false otherwise. + bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; } + + /// \brief true if the source expression is a postfix unary operation, false + /// if it is a prefix unary operation. + bool isPostfixUpdate() const { return IsPostfixUpdate; } + +private: + bool checkBinaryOperation(BinaryOperator *AtomicBinOp, unsigned DiagId = 0, + unsigned NoteId = 0); +}; +} // namespace + +bool OpenMPAtomicUpdateChecker::checkBinaryOperation( + BinaryOperator *AtomicBinOp, unsigned DiagId, unsigned NoteId) { + ExprAnalysisErrorCode ErrorFound = NoError; + SourceLocation ErrorLoc, NoteLoc; + SourceRange ErrorRange, NoteRange; + // Allowed constructs are: + // x = x binop expr; + // x = expr binop x; + if (AtomicBinOp->getOpcode() == BO_Assign) { + X = AtomicBinOp->getLHS(); + if (auto *AtomicInnerBinOp = dyn_cast<BinaryOperator>( + AtomicBinOp->getRHS()->IgnoreParenImpCasts())) { + if (AtomicInnerBinOp->isMultiplicativeOp() || + AtomicInnerBinOp->isAdditiveOp() || AtomicInnerBinOp->isShiftOp() || + AtomicInnerBinOp->isBitwiseOp()) { + Op = AtomicInnerBinOp->getOpcode(); + OpLoc = AtomicInnerBinOp->getOperatorLoc(); + auto *LHS = AtomicInnerBinOp->getLHS(); + auto *RHS = AtomicInnerBinOp->getRHS(); + llvm::FoldingSetNodeID XId, LHSId, RHSId; + X->IgnoreParenImpCasts()->Profile(XId, SemaRef.getASTContext(), + /*Canonical=*/true); + LHS->IgnoreParenImpCasts()->Profile(LHSId, SemaRef.getASTContext(), + /*Canonical=*/true); + RHS->IgnoreParenImpCasts()->Profile(RHSId, SemaRef.getASTContext(), + /*Canonical=*/true); + if (XId == LHSId) { + E = RHS; + IsXLHSInRHSPart = true; + } else if (XId == RHSId) { + E = LHS; + IsXLHSInRHSPart = false; + } else { + ErrorLoc = AtomicInnerBinOp->getExprLoc(); + ErrorRange = AtomicInnerBinOp->getSourceRange(); + NoteLoc = X->getExprLoc(); + NoteRange = X->getSourceRange(); + ErrorFound = NotAnUpdateExpression; + } + } else { + ErrorLoc = AtomicInnerBinOp->getExprLoc(); + ErrorRange = AtomicInnerBinOp->getSourceRange(); + NoteLoc = AtomicInnerBinOp->getOperatorLoc(); + NoteRange = SourceRange(NoteLoc, NoteLoc); + ErrorFound = NotABinaryOperator; + } + } else { + NoteLoc = ErrorLoc = AtomicBinOp->getRHS()->getExprLoc(); + NoteRange = ErrorRange = AtomicBinOp->getRHS()->getSourceRange(); + ErrorFound = NotABinaryExpression; + } + } else { + ErrorLoc = AtomicBinOp->getExprLoc(); + ErrorRange = AtomicBinOp->getSourceRange(); + NoteLoc = AtomicBinOp->getOperatorLoc(); + NoteRange = SourceRange(NoteLoc, NoteLoc); + ErrorFound = NotAnAssignmentOp; + } + if (ErrorFound != NoError && DiagId != 0 && NoteId != 0) { + SemaRef.Diag(ErrorLoc, DiagId) << ErrorRange; + SemaRef.Diag(NoteLoc, NoteId) << ErrorFound << NoteRange; + return true; + } else if (SemaRef.CurContext->isDependentContext()) + E = X = UpdateExpr = nullptr; + return ErrorFound != NoError; +} + +bool OpenMPAtomicUpdateChecker::checkStatement(Stmt *S, unsigned DiagId, + unsigned NoteId) { + ExprAnalysisErrorCode ErrorFound = NoError; + SourceLocation ErrorLoc, NoteLoc; + SourceRange ErrorRange, NoteRange; + // Allowed constructs are: + // x++; + // x--; + // ++x; + // --x; + // x binop= expr; + // x = x binop expr; + // x = expr binop x; + if (auto *AtomicBody = dyn_cast<Expr>(S)) { + AtomicBody = AtomicBody->IgnoreParenImpCasts(); + if (AtomicBody->getType()->isScalarType() || + AtomicBody->isInstantiationDependent()) { + if (auto *AtomicCompAssignOp = dyn_cast<CompoundAssignOperator>( + AtomicBody->IgnoreParenImpCasts())) { + // Check for Compound Assignment Operation + Op = BinaryOperator::getOpForCompoundAssignment( + AtomicCompAssignOp->getOpcode()); + OpLoc = AtomicCompAssignOp->getOperatorLoc(); + E = AtomicCompAssignOp->getRHS(); + X = AtomicCompAssignOp->getLHS(); + IsXLHSInRHSPart = true; + } else if (auto *AtomicBinOp = dyn_cast<BinaryOperator>( + AtomicBody->IgnoreParenImpCasts())) { + // Check for Binary Operation + if(checkBinaryOperation(AtomicBinOp, DiagId, NoteId)) + return true; + } else if (auto *AtomicUnaryOp = + dyn_cast<UnaryOperator>(AtomicBody->IgnoreParenImpCasts())) { + // Check for Unary Operation + if (AtomicUnaryOp->isIncrementDecrementOp()) { + IsPostfixUpdate = AtomicUnaryOp->isPostfix(); + Op = AtomicUnaryOp->isIncrementOp() ? BO_Add : BO_Sub; + OpLoc = AtomicUnaryOp->getOperatorLoc(); + X = AtomicUnaryOp->getSubExpr(); + E = SemaRef.ActOnIntegerConstant(OpLoc, /*uint64_t Val=*/1).get(); + IsXLHSInRHSPart = true; + } else { + ErrorFound = NotAnUnaryIncDecExpression; + ErrorLoc = AtomicUnaryOp->getExprLoc(); + ErrorRange = AtomicUnaryOp->getSourceRange(); + NoteLoc = AtomicUnaryOp->getOperatorLoc(); + NoteRange = SourceRange(NoteLoc, NoteLoc); + } + } else { + ErrorFound = NotABinaryOrUnaryExpression; + NoteLoc = ErrorLoc = AtomicBody->getExprLoc(); + NoteRange = ErrorRange = AtomicBody->getSourceRange(); + } + } else { + ErrorFound = NotAScalarType; + NoteLoc = ErrorLoc = AtomicBody->getLocStart(); + NoteRange = ErrorRange = SourceRange(NoteLoc, NoteLoc); + } + } else { + ErrorFound = NotAnExpression; + NoteLoc = ErrorLoc = S->getLocStart(); + NoteRange = ErrorRange = SourceRange(NoteLoc, NoteLoc); + } + if (ErrorFound != NoError && DiagId != 0 && NoteId != 0) { + SemaRef.Diag(ErrorLoc, DiagId) << ErrorRange; + SemaRef.Diag(NoteLoc, NoteId) << ErrorFound << NoteRange; + return true; + } else if (SemaRef.CurContext->isDependentContext()) + E = X = UpdateExpr = nullptr; + if (ErrorFound == NoError && E && X) { + // Build an update expression of form 'OpaqueValueExpr(x) binop + // OpaqueValueExpr(expr)' or 'OpaqueValueExpr(expr) binop + // OpaqueValueExpr(x)' and then cast it to the type of the 'x' expression. + auto *OVEX = new (SemaRef.getASTContext()) + OpaqueValueExpr(X->getExprLoc(), X->getType(), VK_RValue); + auto *OVEExpr = new (SemaRef.getASTContext()) + OpaqueValueExpr(E->getExprLoc(), E->getType(), VK_RValue); + auto Update = + SemaRef.CreateBuiltinBinOp(OpLoc, Op, IsXLHSInRHSPart ? OVEX : OVEExpr, + IsXLHSInRHSPart ? OVEExpr : OVEX); + if (Update.isInvalid()) + return true; + Update = SemaRef.PerformImplicitConversion(Update.get(), X->getType(), + Sema::AA_Casting); + if (Update.isInvalid()) + return true; + UpdateExpr = Update.get(); + } + return ErrorFound != NoError; +} + StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, @@ -3230,7 +3650,6 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, // top and a single exit at the bottom. // The point of exit cannot be a branch out of the structured block. // longjmp() and throw() must not violate the entry/exit criteria. - // TODO further analysis of associated statements and clauses. OpenMPClauseKind AtomicKind = OMPC_unknown; SourceLocation AtomicKindLoc; for (auto *C : Clauses) { @@ -3250,9 +3669,15 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, } auto Body = CS->getCapturedStmt(); + if (auto *EWC = dyn_cast<ExprWithCleanups>(Body)) + Body = EWC->getSubExpr(); + Expr *X = nullptr; Expr *V = nullptr; Expr *E = nullptr; + Expr *UE = nullptr; + bool IsXLHSInRHSPart = false; + bool IsPostfixUpdate = false; // OpenMP [2.12.6, atomic Construct] // In the next expressions: // * x and v (as applicable) are both l-value expressions with scalar type. @@ -3275,14 +3700,14 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, // expr or subexpressions of expr. // * For forms that allow multiple occurrences of x, the number of times // that x is evaluated is unspecified. - enum { - NotAnExpression, - NotAnAssignmentOp, - NotAScalarType, - NotAnLValue, - NoError - } ErrorFound = NoError; if (AtomicKind == OMPC_read) { + enum { + NotAnExpression, + NotAnAssignmentOp, + NotAScalarType, + NotAnLValue, + NoError + } ErrorFound = NoError; SourceLocation ErrorLoc, NoteLoc; SourceRange ErrorRange, NoteRange; // If clause is read: @@ -3338,6 +3763,13 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, } else if (CurContext->isDependentContext()) V = X = nullptr; } else if (AtomicKind == OMPC_write) { + enum { + NotAnExpression, + NotAnAssignmentOp, + NotAScalarType, + NotAnLValue, + NoError + } ErrorFound = NoError; SourceLocation ErrorLoc, NoteLoc; SourceRange ErrorRange, NoteRange; // If clause is write: @@ -3346,8 +3778,8 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, auto AtomicBinOp = dyn_cast<BinaryOperator>(AtomicBody->IgnoreParenImpCasts()); if (AtomicBinOp && AtomicBinOp->getOpcode() == BO_Assign) { - X = AtomicBinOp->getLHS()->IgnoreParenImpCasts(); - E = AtomicBinOp->getRHS()->IgnoreParenImpCasts(); + X = AtomicBinOp->getLHS(); + E = AtomicBinOp->getRHS(); if ((X->isInstantiationDependent() || X->getType()->isScalarType()) && (E->isInstantiationDependent() || E->getType()->isScalarType())) { if (!X->isLValue()) { @@ -3392,28 +3824,240 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, } else if (CurContext->isDependentContext()) E = X = nullptr; } else if (AtomicKind == OMPC_update || AtomicKind == OMPC_unknown) { - if (!isa<Expr>(Body)) { - Diag(Body->getLocStart(), - diag::err_omp_atomic_update_not_expression_statement) - << (AtomicKind == OMPC_update); + // If clause is update: + // x++; + // x--; + // ++x; + // --x; + // x binop= expr; + // x = x binop expr; + // x = expr binop x; + OpenMPAtomicUpdateChecker Checker(*this); + if (Checker.checkStatement( + Body, (AtomicKind == OMPC_update) + ? diag::err_omp_atomic_update_not_expression_statement + : diag::err_omp_atomic_not_expression_statement, + diag::note_omp_atomic_update)) return StmtError(); + if (!CurContext->isDependentContext()) { + E = Checker.getExpr(); + X = Checker.getX(); + UE = Checker.getUpdateExpr(); + IsXLHSInRHSPart = Checker.isXLHSInRHSPart(); } } else if (AtomicKind == OMPC_capture) { - if (isa<Expr>(Body) && !isa<BinaryOperator>(Body)) { - Diag(Body->getLocStart(), - diag::err_omp_atomic_capture_not_expression_statement); - return StmtError(); - } else if (!isa<Expr>(Body) && !isa<CompoundStmt>(Body)) { - Diag(Body->getLocStart(), - diag::err_omp_atomic_capture_not_compound_statement); - return StmtError(); + enum { + NotAnAssignmentOp, + NotACompoundStatement, + NotTwoSubstatements, + NotASpecificExpression, + NoError + } ErrorFound = NoError; + SourceLocation ErrorLoc, NoteLoc; + SourceRange ErrorRange, NoteRange; + if (auto *AtomicBody = dyn_cast<Expr>(Body)) { + // If clause is a capture: + // v = x++; + // v = x--; + // v = ++x; + // v = --x; + // v = x binop= expr; + // v = x = x binop expr; + // v = x = expr binop x; + auto *AtomicBinOp = + dyn_cast<BinaryOperator>(AtomicBody->IgnoreParenImpCasts()); + if (AtomicBinOp && AtomicBinOp->getOpcode() == BO_Assign) { + V = AtomicBinOp->getLHS(); + Body = AtomicBinOp->getRHS()->IgnoreParenImpCasts(); + OpenMPAtomicUpdateChecker Checker(*this); + if (Checker.checkStatement( + Body, diag::err_omp_atomic_capture_not_expression_statement, + diag::note_omp_atomic_update)) + return StmtError(); + E = Checker.getExpr(); + X = Checker.getX(); + UE = Checker.getUpdateExpr(); + IsXLHSInRHSPart = Checker.isXLHSInRHSPart(); + IsPostfixUpdate = Checker.isPostfixUpdate(); + } else { + ErrorLoc = AtomicBody->getExprLoc(); + ErrorRange = AtomicBody->getSourceRange(); + NoteLoc = AtomicBinOp ? AtomicBinOp->getOperatorLoc() + : AtomicBody->getExprLoc(); + NoteRange = AtomicBinOp ? AtomicBinOp->getSourceRange() + : AtomicBody->getSourceRange(); + ErrorFound = NotAnAssignmentOp; + } + if (ErrorFound != NoError) { + Diag(ErrorLoc, diag::err_omp_atomic_capture_not_expression_statement) + << ErrorRange; + Diag(NoteLoc, diag::note_omp_atomic_capture) << ErrorFound << NoteRange; + return StmtError(); + } else if (CurContext->isDependentContext()) { + UE = V = E = X = nullptr; + } + } else { + // If clause is a capture: + // { v = x; x = expr; } + // { v = x; x++; } + // { v = x; x--; } + // { v = x; ++x; } + // { v = x; --x; } + // { v = x; x binop= expr; } + // { v = x; x = x binop expr; } + // { v = x; x = expr binop x; } + // { x++; v = x; } + // { x--; v = x; } + // { ++x; v = x; } + // { --x; v = x; } + // { x binop= expr; v = x; } + // { x = x binop expr; v = x; } + // { x = expr binop x; v = x; } + if (auto *CS = dyn_cast<CompoundStmt>(Body)) { + // Check that this is { expr1; expr2; } + if (CS->size() == 2) { + auto *First = CS->body_front(); + auto *Second = CS->body_back(); + if (auto *EWC = dyn_cast<ExprWithCleanups>(First)) + First = EWC->getSubExpr()->IgnoreParenImpCasts(); + if (auto *EWC = dyn_cast<ExprWithCleanups>(Second)) + Second = EWC->getSubExpr()->IgnoreParenImpCasts(); + // Need to find what subexpression is 'v' and what is 'x'. + OpenMPAtomicUpdateChecker Checker(*this); + bool IsUpdateExprFound = !Checker.checkStatement(Second); + BinaryOperator *BinOp = nullptr; + if (IsUpdateExprFound) { + BinOp = dyn_cast<BinaryOperator>(First); + IsUpdateExprFound = BinOp && BinOp->getOpcode() == BO_Assign; + } + if (IsUpdateExprFound && !CurContext->isDependentContext()) { + // { v = x; x++; } + // { v = x; x--; } + // { v = x; ++x; } + // { v = x; --x; } + // { v = x; x binop= expr; } + // { v = x; x = x binop expr; } + // { v = x; x = expr binop x; } + // Check that the first expression has form v = x. + auto *PossibleX = BinOp->getRHS()->IgnoreParenImpCasts(); + llvm::FoldingSetNodeID XId, PossibleXId; + Checker.getX()->Profile(XId, Context, /*Canonical=*/true); + PossibleX->Profile(PossibleXId, Context, /*Canonical=*/true); + IsUpdateExprFound = XId == PossibleXId; + if (IsUpdateExprFound) { + V = BinOp->getLHS(); + X = Checker.getX(); + E = Checker.getExpr(); + UE = Checker.getUpdateExpr(); + IsXLHSInRHSPart = Checker.isXLHSInRHSPart(); + IsPostfixUpdate = true; + } + } + if (!IsUpdateExprFound) { + IsUpdateExprFound = !Checker.checkStatement(First); + BinOp = nullptr; + if (IsUpdateExprFound) { + BinOp = dyn_cast<BinaryOperator>(Second); + IsUpdateExprFound = BinOp && BinOp->getOpcode() == BO_Assign; + } + if (IsUpdateExprFound && !CurContext->isDependentContext()) { + // { x++; v = x; } + // { x--; v = x; } + // { ++x; v = x; } + // { --x; v = x; } + // { x binop= expr; v = x; } + // { x = x binop expr; v = x; } + // { x = expr binop x; v = x; } + // Check that the second expression has form v = x. + auto *PossibleX = BinOp->getRHS()->IgnoreParenImpCasts(); + llvm::FoldingSetNodeID XId, PossibleXId; + Checker.getX()->Profile(XId, Context, /*Canonical=*/true); + PossibleX->Profile(PossibleXId, Context, /*Canonical=*/true); + IsUpdateExprFound = XId == PossibleXId; + if (IsUpdateExprFound) { + V = BinOp->getLHS(); + X = Checker.getX(); + E = Checker.getExpr(); + UE = Checker.getUpdateExpr(); + IsXLHSInRHSPart = Checker.isXLHSInRHSPart(); + IsPostfixUpdate = false; + } + } + } + if (!IsUpdateExprFound) { + // { v = x; x = expr; } + auto *FirstBinOp = dyn_cast<BinaryOperator>(First); + if (!FirstBinOp || FirstBinOp->getOpcode() != BO_Assign) { + ErrorFound = NotAnAssignmentOp; + NoteLoc = ErrorLoc = FirstBinOp ? FirstBinOp->getOperatorLoc() + : First->getLocStart(); + NoteRange = ErrorRange = FirstBinOp + ? FirstBinOp->getSourceRange() + : SourceRange(ErrorLoc, ErrorLoc); + } else { + auto *SecondBinOp = dyn_cast<BinaryOperator>(Second); + if (!SecondBinOp || SecondBinOp->getOpcode() != BO_Assign) { + ErrorFound = NotAnAssignmentOp; + NoteLoc = ErrorLoc = SecondBinOp ? SecondBinOp->getOperatorLoc() + : Second->getLocStart(); + NoteRange = ErrorRange = SecondBinOp + ? SecondBinOp->getSourceRange() + : SourceRange(ErrorLoc, ErrorLoc); + } else { + auto *PossibleXRHSInFirst = + FirstBinOp->getRHS()->IgnoreParenImpCasts(); + auto *PossibleXLHSInSecond = + SecondBinOp->getLHS()->IgnoreParenImpCasts(); + llvm::FoldingSetNodeID X1Id, X2Id; + PossibleXRHSInFirst->Profile(X1Id, Context, /*Canonical=*/true); + PossibleXLHSInSecond->Profile(X2Id, Context, + /*Canonical=*/true); + IsUpdateExprFound = X1Id == X2Id; + if (IsUpdateExprFound) { + V = FirstBinOp->getLHS(); + X = SecondBinOp->getLHS(); + E = SecondBinOp->getRHS(); + UE = nullptr; + IsXLHSInRHSPart = false; + IsPostfixUpdate = true; + } else { + ErrorFound = NotASpecificExpression; + ErrorLoc = FirstBinOp->getExprLoc(); + ErrorRange = FirstBinOp->getSourceRange(); + NoteLoc = SecondBinOp->getLHS()->getExprLoc(); + NoteRange = SecondBinOp->getRHS()->getSourceRange(); + } + } + } + } + } else { + NoteLoc = ErrorLoc = Body->getLocStart(); + NoteRange = ErrorRange = + SourceRange(Body->getLocStart(), Body->getLocStart()); + ErrorFound = NotTwoSubstatements; + } + } else { + NoteLoc = ErrorLoc = Body->getLocStart(); + NoteRange = ErrorRange = + SourceRange(Body->getLocStart(), Body->getLocStart()); + ErrorFound = NotACompoundStatement; + } + if (ErrorFound != NoError) { + Diag(ErrorLoc, diag::err_omp_atomic_capture_not_compound_statement) + << ErrorRange; + Diag(NoteLoc, diag::note_omp_atomic_capture) << ErrorFound << NoteRange; + return StmtError(); + } else if (CurContext->isDependentContext()) { + UE = V = E = X = nullptr; + } } } getCurFunction()->setHasBranchProtectedScope(); return OMPAtomicDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt, - X, V, E); + X, V, E, UE, IsXLHSInRHSPart, + IsPostfixUpdate); } StmtResult Sema::ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, @@ -3655,6 +4299,9 @@ ExprResult Sema::VerifyPositiveIntegerConstantInClause(Expr *E, << E->getSourceRange(); return ExprError(); } + if (CKind == OMPC_collapse) { + DSAStack->setCollapseNumber(Result.getExtValue()); + } return ICE; } @@ -3882,6 +4529,7 @@ OMPClause *Sema::ActOnOpenMPScheduleClause( return nullptr; } Expr *ValExpr = ChunkSize; + Expr *HelperValExpr = nullptr; if (ChunkSize) { if (!ChunkSize->isValueDependent() && !ChunkSize->isTypeDependent() && !ChunkSize->isInstantiationDependent() && @@ -3898,17 +4546,25 @@ OMPClause *Sema::ActOnOpenMPScheduleClause( // chunk_size must be a loop invariant integer expression with a positive // value. llvm::APSInt Result; - if (ValExpr->isIntegerConstantExpr(Result, Context) && - Result.isSigned() && !Result.isStrictlyPositive()) { - Diag(ChunkSizeLoc, diag::err_omp_negative_expression_in_clause) - << "schedule" << ChunkSize->getSourceRange(); - return nullptr; + if (ValExpr->isIntegerConstantExpr(Result, Context)) { + if (Result.isSigned() && !Result.isStrictlyPositive()) { + Diag(ChunkSizeLoc, diag::err_omp_negative_expression_in_clause) + << "schedule" << ChunkSize->getSourceRange(); + return nullptr; + } + } else if (isParallelOrTaskRegion(DSAStack->getCurrentDirective())) { + auto *ImpVar = buildVarDecl(*this, ChunkSize->getExprLoc(), + ChunkSize->getType(), ".chunk."); + auto *ImpVarRef = buildDeclRefExpr(*this, ImpVar, ChunkSize->getType(), + ChunkSize->getExprLoc(), + /*RefersToCapture=*/true); + HelperValExpr = ImpVarRef; } } } return new (Context) OMPScheduleClause(StartLoc, LParenLoc, KindLoc, CommaLoc, - EndLoc, Kind, ValExpr); + EndLoc, Kind, ValExpr, HelperValExpr); } OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind, @@ -4133,14 +4789,6 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, continue; } - // OpenMP [2.9.3.3, Restrictions, C/C++, p.1] - // A variable of class type (or array thereof) that appears in a private - // clause requires an accessible, unambiguous default constructor for the - // class type. - while (Type->isArrayType()) { - Type = cast<ArrayType>(Type.getTypePtr())->getElementType(); - } - // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced // in a Construct] // Variables with the predetermined data-sharing attributes may not be @@ -4156,25 +4804,36 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, continue; } + // Variably modified types are not supported for tasks. + if (!Type->isAnyPointerType() && Type->isVariablyModifiedType() && + DSAStack->getCurrentDirective() == OMPD_task) { + Diag(ELoc, diag::err_omp_variably_modified_type_not_supported) + << getOpenMPClauseName(OMPC_private) << Type + << getOpenMPDirectiveName(DSAStack->getCurrentDirective()); + bool IsDecl = + VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; + Diag(VD->getLocation(), + IsDecl ? diag::note_previous_decl : diag::note_defined_here) + << VD; + continue; + } + + // OpenMP [2.9.3.3, Restrictions, C/C++, p.1] + // A variable of class type (or array thereof) that appears in a private + // clause requires an accessible, unambiguous default constructor for the + // class type. // Generate helper private variable and initialize it with the default // value. The address of the original variable is replaced by the address of // the new private variable in CodeGen. This new variable is not added to // IdResolver, so the code in the OpenMP region uses original variable for // proper diagnostics. - auto VDPrivate = - VarDecl::Create(Context, CurContext, DE->getLocStart(), - DE->getExprLoc(), VD->getIdentifier(), VD->getType(), - VD->getTypeSourceInfo(), /*S*/ SC_Auto); - ActOnUninitializedDecl(VDPrivate, /*TypeMayContainAuto*/ false); + Type = Type.getUnqualifiedType(); + auto VDPrivate = buildVarDecl(*this, DE->getExprLoc(), Type, VD->getName()); + ActOnUninitializedDecl(VDPrivate, /*TypeMayContainAuto=*/false); if (VDPrivate->isInvalidDecl()) continue; - CurContext->addDecl(VDPrivate); - auto VDPrivateRefExpr = - DeclRefExpr::Create(Context, /*QualifierLoc*/ NestedNameSpecifierLoc(), - /*TemplateKWLoc*/ SourceLocation(), VDPrivate, - /*RefersToEnclosingVariableOrCapture*/ false, - /*NameLoc*/ SourceLocation(), DE->getType(), - /*VK*/ VK_LValue); + auto VDPrivateRefExpr = buildDeclRefExpr( + *this, VDPrivate, DE->getType().getUnqualifiedType(), DE->getExprLoc()); DSAStack->addDSA(VD, DE, OMPC_private); Vars.push_back(DE); @@ -4285,14 +4944,12 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, // A variable of class type (or array thereof) that appears in a private // clause requires an accessible, unambiguous copy constructor for the // class type. - Type = Context.getBaseElementType(Type); + auto ElemType = Context.getBaseElementType(Type).getNonReferenceType(); // If an implicit firstprivate variable found it was checked already. if (!IsImplicitClause) { DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(VD, false); - Type = Type.getNonReferenceType().getCanonicalType(); - bool IsConstant = Type.isConstant(Context); - Type = Context.getBaseElementType(Type); + bool IsConstant = ElemType.isConstant(Context); // OpenMP [2.4.13, Data-sharing Attribute Clauses] // A list item that specifies a given variable may not appear in more // than one clause on the same directive, except that a variable may be @@ -4375,10 +5032,22 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, } } + // Variably modified types are not supported for tasks. + if (!Type->isAnyPointerType() && Type->isVariablyModifiedType() && + DSAStack->getCurrentDirective() == OMPD_task) { + Diag(ELoc, diag::err_omp_variably_modified_type_not_supported) + << getOpenMPClauseName(OMPC_firstprivate) << Type + << getOpenMPDirectiveName(DSAStack->getCurrentDirective()); + bool IsDecl = + VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; + Diag(VD->getLocation(), + IsDecl ? diag::note_previous_decl : diag::note_defined_here) + << VD; + continue; + } + Type = Type.getUnqualifiedType(); - auto VDPrivate = VarDecl::Create(Context, CurContext, DE->getLocStart(), - ELoc, VD->getIdentifier(), VD->getType(), - VD->getTypeSourceInfo(), /*S*/ SC_Auto); + auto VDPrivate = buildVarDecl(*this, ELoc, Type, VD->getName()); // Generate helper private variable and initialize it with the value of the // original variable. The address of the original variable is replaced by // the address of the new private variable in the CodeGen. This new variable @@ -4387,19 +5056,16 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, Expr *VDInitRefExpr = nullptr; // For arrays generate initializer for single element and replace it by the // original array element in CodeGen. - if (DE->getType()->isArrayType()) { - auto VDInit = VarDecl::Create(Context, CurContext, DE->getLocStart(), - ELoc, VD->getIdentifier(), Type, - VD->getTypeSourceInfo(), /*S*/ SC_Auto); - CurContext->addHiddenDecl(VDInit); - VDInitRefExpr = DeclRefExpr::Create( - Context, /*QualifierLoc*/ NestedNameSpecifierLoc(), - /*TemplateKWLoc*/ SourceLocation(), VDInit, - /*RefersToEnclosingVariableOrCapture*/ true, ELoc, Type, - /*VK*/ VK_LValue); - VDInit->setIsUsed(); + if (Type->isArrayType()) { + auto VDInit = + buildVarDecl(*this, DE->getExprLoc(), ElemType, VD->getName()); + VDInitRefExpr = buildDeclRefExpr(*this, VDInit, ElemType, ELoc); auto Init = DefaultLvalueConversion(VDInitRefExpr).get(); - InitializedEntity Entity = InitializedEntity::InitializeVariable(VDInit); + ElemType = ElemType.getUnqualifiedType(); + auto *VDInitTemp = buildVarDecl(*this, DE->getLocStart(), ElemType, + ".firstprivate.temp"); + InitializedEntity Entity = + InitializedEntity::InitializeVariable(VDInitTemp); InitializationKind Kind = InitializationKind::CreateCopy(ELoc, ELoc); InitializationSequence InitSeq(*this, Entity, Kind, Init); @@ -4409,15 +5075,13 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, else VDPrivate->setInit(Result.getAs<Expr>()); } else { - AddInitializerToDecl( - VDPrivate, - DefaultLvalueConversion( - DeclRefExpr::Create(Context, NestedNameSpecifierLoc(), - SourceLocation(), DE->getDecl(), - /*RefersToEnclosingVariableOrCapture=*/true, - DE->getExprLoc(), DE->getType(), - /*VK=*/VK_LValue)).get(), - /*DirectInit=*/false, /*TypeMayContainAuto=*/false); + auto *VDInit = + buildVarDecl(*this, DE->getLocStart(), Type, ".firstprivate.temp"); + VDInitRefExpr = + buildDeclRefExpr(*this, VDInit, DE->getType(), DE->getExprLoc()); + AddInitializerToDecl(VDPrivate, + DefaultLvalueConversion(VDInitRefExpr).get(), + /*DirectInit=*/false, /*TypeMayContainAuto=*/false); } if (VDPrivate->isInvalidDecl()) { if (IsImplicitClause) { @@ -4427,12 +5091,8 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, continue; } CurContext->addDecl(VDPrivate); - auto VDPrivateRefExpr = - DeclRefExpr::Create(Context, /*QualifierLoc*/ NestedNameSpecifierLoc(), - /*TemplateKWLoc*/ SourceLocation(), VDPrivate, - /*RefersToEnclosingVariableOrCapture*/ false, - DE->getLocStart(), DE->getType(), - /*VK*/ VK_LValue); + auto VDPrivateRefExpr = buildDeclRefExpr( + *this, VDPrivate, DE->getType().getUnqualifiedType(), DE->getExprLoc()); DSAStack->addDSA(VD, DE, OMPC_firstprivate); Vars.push_back(DE); PrivateCopies.push_back(VDPrivateRefExpr); @@ -4451,11 +5111,17 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation LParenLoc, SourceLocation EndLoc) { SmallVector<Expr *, 8> Vars; + SmallVector<Expr *, 8> SrcExprs; + SmallVector<Expr *, 8> DstExprs; + SmallVector<Expr *, 8> AssignmentOps; for (auto &RefExpr : VarList) { assert(RefExpr && "NULL expr in OpenMP lastprivate clause."); if (isa<DependentScopeDeclRefExpr>(RefExpr)) { // It will be analyzed later. Vars.push_back(RefExpr); + SrcExprs.push_back(nullptr); + DstExprs.push_back(nullptr); + AssignmentOps.push_back(nullptr); continue; } @@ -4477,6 +5143,9 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, if (Type->isDependentType() || Type->isInstantiationDependentType()) { // It will be analyzed later. Vars.push_back(DE); + SrcExprs.push_back(nullptr); + DstExprs.push_back(nullptr); + AssignmentOps.push_back(nullptr); continue; } @@ -4521,6 +5190,7 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, // lastprivate clause on a worksharing construct if any of the corresponding // worksharing regions ever binds to any of the corresponding parallel // regions. + DSAStackTy::DSAVarData TopDVar = DVar; if (isOpenMPWorksharingDirective(CurrDir) && !isOpenMPParallelDirective(CurrDir)) { DVar = DSAStack->getImplicitDSA(VD, true); @@ -4540,65 +5210,39 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, // A variable of class type (or array thereof) that appears in a // lastprivate clause requires an accessible, unambiguous copy assignment // operator for the class type. - while (Type.getNonReferenceType()->isArrayType()) - Type = cast<ArrayType>(Type.getNonReferenceType().getTypePtr()) - ->getElementType(); - CXXRecordDecl *RD = getLangOpts().CPlusPlus - ? Type.getNonReferenceType()->getAsCXXRecordDecl() - : nullptr; - // FIXME This code must be replaced by actual copying and destructing of the - // lastprivate variable. - if (RD) { - CXXMethodDecl *MD = LookupCopyingAssignment(RD, 0, false, 0); - DeclAccessPair FoundDecl = DeclAccessPair::make(MD, MD->getAccess()); - if (MD) { - if (CheckMemberAccess(ELoc, RD, FoundDecl) == AR_inaccessible || - MD->isDeleted()) { - Diag(ELoc, diag::err_omp_required_method) - << getOpenMPClauseName(OMPC_lastprivate) << 2; - bool IsDecl = VD->isThisDeclarationADefinition(Context) == - VarDecl::DeclarationOnly; - Diag(VD->getLocation(), - IsDecl ? diag::note_previous_decl : diag::note_defined_here) - << VD; - Diag(RD->getLocation(), diag::note_previous_decl) << RD; - continue; - } - MarkFunctionReferenced(ELoc, MD); - DiagnoseUseOfDecl(MD, ELoc); - } - - CXXDestructorDecl *DD = RD->getDestructor(); - if (DD) { - PartialDiagnostic PD = - PartialDiagnostic(PartialDiagnostic::NullDiagnostic()); - if (CheckDestructorAccess(ELoc, DD, PD) == AR_inaccessible || - DD->isDeleted()) { - Diag(ELoc, diag::err_omp_required_method) - << getOpenMPClauseName(OMPC_lastprivate) << 4; - bool IsDecl = VD->isThisDeclarationADefinition(Context) == - VarDecl::DeclarationOnly; - Diag(VD->getLocation(), - IsDecl ? diag::note_previous_decl : diag::note_defined_here) - << VD; - Diag(RD->getLocation(), diag::note_previous_decl) << RD; - continue; - } - MarkFunctionReferenced(ELoc, DD); - DiagnoseUseOfDecl(DD, ELoc); - } - } + Type = Context.getBaseElementType(Type).getNonReferenceType(); + auto *SrcVD = buildVarDecl(*this, DE->getLocStart(), + Type.getUnqualifiedType(), ".lastprivate.src"); + auto *PseudoSrcExpr = buildDeclRefExpr( + *this, SrcVD, Type.getUnqualifiedType(), DE->getExprLoc()); + auto *DstVD = + buildVarDecl(*this, DE->getLocStart(), Type, ".lastprivate.dst"); + auto *PseudoDstExpr = + buildDeclRefExpr(*this, DstVD, Type, DE->getExprLoc()); + // For arrays generate assignment operation for single element and replace + // it by the original array element in CodeGen. + auto AssignmentOp = BuildBinOp(/*S=*/nullptr, DE->getExprLoc(), BO_Assign, + PseudoDstExpr, PseudoSrcExpr); + if (AssignmentOp.isInvalid()) + continue; + AssignmentOp = ActOnFinishFullExpr(AssignmentOp.get(), DE->getExprLoc(), + /*DiscardedValue=*/true); + if (AssignmentOp.isInvalid()) + continue; - if (DVar.CKind != OMPC_firstprivate) + if (TopDVar.CKind != OMPC_firstprivate) DSAStack->addDSA(VD, DE, OMPC_lastprivate); Vars.push_back(DE); + SrcExprs.push_back(PseudoSrcExpr); + DstExprs.push_back(PseudoDstExpr); + AssignmentOps.push_back(AssignmentOp.get()); } if (Vars.empty()) return nullptr; return OMPLastprivateClause::Create(Context, StartLoc, LParenLoc, EndLoc, - Vars); + Vars, SrcExprs, DstExprs, AssignmentOps); } OMPClause *Sema::ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, @@ -4718,19 +5362,19 @@ OMPClause *Sema::ActOnOpenMPReductionClause( switch (OOK) { case OO_Plus: case OO_Minus: - BOK = BO_AddAssign; + BOK = BO_Add; break; case OO_Star: - BOK = BO_MulAssign; + BOK = BO_Mul; break; case OO_Amp: - BOK = BO_AndAssign; + BOK = BO_And; break; case OO_Pipe: - BOK = BO_OrAssign; + BOK = BO_Or; break; case OO_Caret: - BOK = BO_XorAssign; + BOK = BO_Xor; break; case OO_AmpAmp: BOK = BO_LAnd; @@ -4738,7 +5382,44 @@ OMPClause *Sema::ActOnOpenMPReductionClause( case OO_PipePipe: BOK = BO_LOr; break; - default: + case OO_New: + case OO_Delete: + case OO_Array_New: + case OO_Array_Delete: + case OO_Slash: + case OO_Percent: + case OO_Tilde: + case OO_Exclaim: + case OO_Equal: + case OO_Less: + case OO_Greater: + case OO_LessEqual: + case OO_GreaterEqual: + case OO_PlusEqual: + case OO_MinusEqual: + case OO_StarEqual: + case OO_SlashEqual: + case OO_PercentEqual: + case OO_CaretEqual: + case OO_AmpEqual: + case OO_PipeEqual: + case OO_LessLess: + case OO_GreaterGreater: + case OO_LessLessEqual: + case OO_GreaterGreaterEqual: + case OO_EqualEqual: + case OO_ExclaimEqual: + case OO_PlusPlus: + case OO_MinusMinus: + case OO_Comma: + case OO_ArrowStar: + case OO_Arrow: + case OO_Call: + case OO_Subscript: + case OO_Conditional: + case NUM_OVERLOADED_OPERATORS: + llvm_unreachable("Unexpected reduction identifier"); + case OO_None: if (auto II = DN.getAsIdentifierInfo()) { if (II->isStr("max")) BOK = BO_GT; @@ -4760,11 +5441,17 @@ OMPClause *Sema::ActOnOpenMPReductionClause( } SmallVector<Expr *, 8> Vars; + SmallVector<Expr *, 8> LHSs; + SmallVector<Expr *, 8> RHSs; + SmallVector<Expr *, 8> ReductionOps; for (auto RefExpr : VarList) { assert(RefExpr && "nullptr expr in OpenMP reduction clause."); if (isa<DependentScopeDeclRefExpr>(RefExpr)) { // It will be analyzed later. Vars.push_back(RefExpr); + LHSs.push_back(nullptr); + RHSs.push_back(nullptr); + ReductionOps.push_back(nullptr); continue; } @@ -4773,6 +5460,9 @@ OMPClause *Sema::ActOnOpenMPReductionClause( RefExpr->containsUnexpandedParameterPack()) { // It will be analyzed later. Vars.push_back(RefExpr); + LHSs.push_back(nullptr); + RHSs.push_back(nullptr); + ReductionOps.push_back(nullptr); continue; } @@ -4865,23 +5555,6 @@ OMPClause *Sema::ActOnOpenMPReductionClause( << VD; continue; } - bool Suppress = getDiagnostics().getSuppressAllDiagnostics(); - getDiagnostics().setSuppressAllDiagnostics(true); - ExprResult ReductionOp = - BuildBinOp(DSAStack->getCurScope(), ReductionId.getLocStart(), BOK, - RefExpr, RefExpr); - getDiagnostics().setSuppressAllDiagnostics(Suppress); - if (ReductionOp.isInvalid()) { - Diag(ELoc, diag::err_omp_reduction_id_not_compatible) << Type - << ReductionIdRange; - bool IsDecl = - VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; - Diag(VD->getLocation(), - IsDecl ? diag::note_previous_decl : diag::note_defined_here) - << VD; - continue; - } - // OpenMP [2.14.1.1, Data-sharing Attribute Rules for Variables Referenced // in a Construct] // Variables with the predetermined data-sharing attributes may not be @@ -4924,55 +5597,160 @@ OMPClause *Sema::ActOnOpenMPReductionClause( continue; } } - - CXXRecordDecl *RD = getLangOpts().CPlusPlus - ? Type.getNonReferenceType()->getAsCXXRecordDecl() - : nullptr; - // FIXME This code must be replaced by actual constructing/destructing of - // the reduction variable. - if (RD) { - CXXConstructorDecl *CD = LookupDefaultConstructor(RD); - PartialDiagnostic PD = - PartialDiagnostic(PartialDiagnostic::NullDiagnostic()); - if (!CD || - CheckConstructorAccess(ELoc, CD, - InitializedEntity::InitializeTemporary(Type), - CD->getAccess(), PD) == AR_inaccessible || - CD->isDeleted()) { - Diag(ELoc, diag::err_omp_required_method) - << getOpenMPClauseName(OMPC_reduction) << 0; - bool IsDecl = VD->isThisDeclarationADefinition(Context) == - VarDecl::DeclarationOnly; - Diag(VD->getLocation(), - IsDecl ? diag::note_previous_decl : diag::note_defined_here) - << VD; - Diag(RD->getLocation(), diag::note_previous_decl) << RD; - continue; + Type = Type.getNonLValueExprType(Context).getUnqualifiedType(); + auto *LHSVD = buildVarDecl(*this, ELoc, Type, ".reduction.lhs"); + auto *RHSVD = buildVarDecl(*this, ELoc, Type, VD->getName()); + // Add initializer for private variable. + Expr *Init = nullptr; + switch (BOK) { + case BO_Add: + case BO_Xor: + case BO_Or: + case BO_LOr: + // '+', '-', '^', '|', '||' reduction ops - initializer is '0'. + if (Type->isScalarType() || Type->isAnyComplexType()) { + Init = ActOnIntegerConstant(ELoc, /*Val=*/0).get(); } - MarkFunctionReferenced(ELoc, CD); - DiagnoseUseOfDecl(CD, ELoc); - - CXXDestructorDecl *DD = RD->getDestructor(); - if (DD) { - if (CheckDestructorAccess(ELoc, DD, PD) == AR_inaccessible || - DD->isDeleted()) { - Diag(ELoc, diag::err_omp_required_method) - << getOpenMPClauseName(OMPC_reduction) << 4; - bool IsDecl = VD->isThisDeclarationADefinition(Context) == - VarDecl::DeclarationOnly; - Diag(VD->getLocation(), - IsDecl ? diag::note_previous_decl : diag::note_defined_here) - << VD; - Diag(RD->getLocation(), diag::note_previous_decl) << RD; - continue; + break; + case BO_Mul: + case BO_LAnd: + if (Type->isScalarType() || Type->isAnyComplexType()) { + // '*' and '&&' reduction ops - initializer is '1'. + Init = ActOnIntegerConstant(ELoc, /*Val=*/1).get(); + } + break; + case BO_And: { + // '&' reduction op - initializer is '~0'. + QualType OrigType = Type; + if (auto *ComplexTy = OrigType->getAs<ComplexType>()) { + Type = ComplexTy->getElementType(); + } + if (Type->isRealFloatingType()) { + llvm::APFloat InitValue = + llvm::APFloat::getAllOnesValue(Context.getTypeSize(Type), + /*isIEEE=*/true); + Init = FloatingLiteral::Create(Context, InitValue, /*isexact=*/true, + Type, ELoc); + } else if (Type->isScalarType()) { + auto Size = Context.getTypeSize(Type); + QualType IntTy = Context.getIntTypeForBitwidth(Size, /*Signed=*/0); + llvm::APInt InitValue = llvm::APInt::getAllOnesValue(Size); + Init = IntegerLiteral::Create(Context, InitValue, IntTy, ELoc); + } + if (Init && OrigType->isAnyComplexType()) { + // Init = 0xFFFF + 0xFFFFi; + auto *Im = new (Context) ImaginaryLiteral(Init, OrigType); + Init = CreateBuiltinBinOp(ELoc, BO_Add, Init, Im).get(); + } + Type = OrigType; + break; + } + case BO_LT: + case BO_GT: { + // 'min' reduction op - initializer is 'Largest representable number in + // the reduction list item type'. + // 'max' reduction op - initializer is 'Least representable number in + // the reduction list item type'. + if (Type->isIntegerType() || Type->isPointerType()) { + bool IsSigned = Type->hasSignedIntegerRepresentation(); + auto Size = Context.getTypeSize(Type); + QualType IntTy = + Context.getIntTypeForBitwidth(Size, /*Signed=*/IsSigned); + llvm::APInt InitValue = + (BOK != BO_LT) + ? IsSigned ? llvm::APInt::getSignedMinValue(Size) + : llvm::APInt::getMinValue(Size) + : IsSigned ? llvm::APInt::getSignedMaxValue(Size) + : llvm::APInt::getMaxValue(Size); + Init = IntegerLiteral::Create(Context, InitValue, IntTy, ELoc); + if (Type->isPointerType()) { + // Cast to pointer type. + auto CastExpr = BuildCStyleCastExpr( + SourceLocation(), Context.getTrivialTypeSourceInfo(Type, ELoc), + SourceLocation(), Init); + if (CastExpr.isInvalid()) + continue; + Init = CastExpr.get(); } - MarkFunctionReferenced(ELoc, DD); - DiagnoseUseOfDecl(DD, ELoc); + } else if (Type->isRealFloatingType()) { + llvm::APFloat InitValue = llvm::APFloat::getLargest( + Context.getFloatTypeSemantics(Type), BOK != BO_LT); + Init = FloatingLiteral::Create(Context, InitValue, /*isexact=*/true, + Type, ELoc); + } + break; + } + case BO_PtrMemD: + case BO_PtrMemI: + case BO_MulAssign: + case BO_Div: + case BO_Rem: + case BO_Sub: + case BO_Shl: + case BO_Shr: + case BO_LE: + case BO_GE: + case BO_EQ: + case BO_NE: + case BO_AndAssign: + case BO_XorAssign: + case BO_OrAssign: + case BO_Assign: + case BO_AddAssign: + case BO_SubAssign: + case BO_DivAssign: + case BO_RemAssign: + case BO_ShlAssign: + case BO_ShrAssign: + case BO_Comma: + llvm_unreachable("Unexpected reduction operation"); + } + if (Init) { + AddInitializerToDecl(RHSVD, Init, /*DirectInit=*/false, + /*TypeMayContainAuto=*/false); + } else { + ActOnUninitializedDecl(RHSVD, /*TypeMayContainAuto=*/false); + } + if (!RHSVD->hasInit()) { + Diag(ELoc, diag::err_omp_reduction_id_not_compatible) << Type + << ReductionIdRange; + bool IsDecl = + VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; + Diag(VD->getLocation(), + IsDecl ? diag::note_previous_decl : diag::note_defined_here) + << VD; + continue; + } + auto *LHSDRE = buildDeclRefExpr(*this, LHSVD, Type, ELoc); + auto *RHSDRE = buildDeclRefExpr(*this, RHSVD, Type, ELoc); + ExprResult ReductionOp = + BuildBinOp(DSAStack->getCurScope(), ReductionId.getLocStart(), BOK, + LHSDRE, RHSDRE); + if (ReductionOp.isUsable()) { + if (BOK != BO_LT && BOK != BO_GT) { + ReductionOp = + BuildBinOp(DSAStack->getCurScope(), ReductionId.getLocStart(), + BO_Assign, LHSDRE, ReductionOp.get()); + } else { + auto *ConditionalOp = new (Context) ConditionalOperator( + ReductionOp.get(), SourceLocation(), LHSDRE, SourceLocation(), + RHSDRE, Type, VK_LValue, OK_Ordinary); + ReductionOp = + BuildBinOp(DSAStack->getCurScope(), ReductionId.getLocStart(), + BO_Assign, LHSDRE, ConditionalOp); + } + if (ReductionOp.isUsable()) { + ReductionOp = ActOnFinishFullExpr(ReductionOp.get()); } } + if (ReductionOp.isInvalid()) + continue; DSAStack->addDSA(VD, DE, OMPC_reduction); Vars.push_back(DE); + LHSs.push_back(LHSDRE); + RHSs.push_back(RHSDRE); + ReductionOps.push_back(ReductionOp.get()); } if (Vars.empty()) @@ -4980,7 +5758,8 @@ OMPClause *Sema::ActOnOpenMPReductionClause( return OMPReductionClause::Create( Context, StartLoc, LParenLoc, ColonLoc, EndLoc, Vars, - ReductionIdScopeSpec.getWithLocInContext(Context), ReductionId); + ReductionIdScopeSpec.getWithLocInContext(Context), ReductionId, LHSs, + RHSs, ReductionOps); } OMPClause *Sema::ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, @@ -4989,11 +5768,13 @@ OMPClause *Sema::ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation ColonLoc, SourceLocation EndLoc) { SmallVector<Expr *, 8> Vars; + SmallVector<Expr *, 8> Inits; for (auto &RefExpr : VarList) { assert(RefExpr && "NULL expr in OpenMP linear clause."); if (isa<DependentScopeDeclRefExpr>(RefExpr)) { // It will be analyzed later. Vars.push_back(RefExpr); + Inits.push_back(nullptr); continue; } @@ -5035,6 +5816,7 @@ OMPClause *Sema::ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, if (QType->isDependentType() || QType->isInstantiationDependentType()) { // It will be analyzed later. Vars.push_back(DE); + Inits.push_back(nullptr); continue; } @@ -5080,14 +5862,22 @@ OMPClause *Sema::ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, continue; } + // Build var to save initial value. + VarDecl *Init = buildVarDecl(*this, ELoc, QType, ".linear.start"); + AddInitializerToDecl(Init, DefaultLvalueConversion(DE).get(), + /*DirectInit*/ false, /*TypeMayContainAuto*/ false); + auto InitRef = buildDeclRefExpr( + *this, Init, DE->getType().getUnqualifiedType(), DE->getExprLoc()); DSAStack->addDSA(VD, DE, OMPC_linear); Vars.push_back(DE); + Inits.push_back(InitRef); } if (Vars.empty()) return nullptr; Expr *StepExpr = Step; + Expr *CalcStepExpr = nullptr; if (Step && !Step->isValueDependent() && !Step->isTypeDependent() && !Step->isInstantiationDependent() && !Step->containsUnexpandedParameterPack()) { @@ -5097,17 +5887,82 @@ OMPClause *Sema::ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, return nullptr; StepExpr = Val.get(); + // Build var to save the step value. + VarDecl *SaveVar = + buildVarDecl(*this, StepLoc, StepExpr->getType(), ".linear.step"); + ExprResult SaveRef = + buildDeclRefExpr(*this, SaveVar, StepExpr->getType(), StepLoc); + ExprResult CalcStep = + BuildBinOp(CurScope, StepLoc, BO_Assign, SaveRef.get(), StepExpr); + // Warn about zero linear step (it would be probably better specified as // making corresponding variables 'const'). llvm::APSInt Result; - if (StepExpr->isIntegerConstantExpr(Result, Context) && - !Result.isNegative() && !Result.isStrictlyPositive()) + bool IsConstant = StepExpr->isIntegerConstantExpr(Result, Context); + if (IsConstant && !Result.isNegative() && !Result.isStrictlyPositive()) Diag(StepLoc, diag::warn_omp_linear_step_zero) << Vars[0] << (Vars.size() > 1); + if (!IsConstant && CalcStep.isUsable()) { + // Calculate the step beforehand instead of doing this on each iteration. + // (This is not used if the number of iterations may be kfold-ed). + CalcStepExpr = CalcStep.get(); + } } return OMPLinearClause::Create(Context, StartLoc, LParenLoc, ColonLoc, EndLoc, - Vars, StepExpr); + Vars, Inits, StepExpr, CalcStepExpr); +} + +static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV, + Expr *NumIterations, Sema &SemaRef, + Scope *S) { + // Walk the vars and build update/final expressions for the CodeGen. + SmallVector<Expr *, 8> Updates; + SmallVector<Expr *, 8> Finals; + Expr *Step = Clause.getStep(); + Expr *CalcStep = Clause.getCalcStep(); + // OpenMP [2.14.3.7, linear clause] + // If linear-step is not specified it is assumed to be 1. + if (Step == nullptr) + Step = SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get(); + else if (CalcStep) + Step = cast<BinaryOperator>(CalcStep)->getLHS(); + bool HasErrors = false; + auto CurInit = Clause.inits().begin(); + for (auto &RefExpr : Clause.varlists()) { + Expr *InitExpr = *CurInit; + + // Build privatized reference to the current linear var. + auto DE = cast<DeclRefExpr>(RefExpr); + auto PrivateRef = + buildDeclRefExpr(SemaRef, cast<VarDecl>(DE->getDecl()), + DE->getType().getUnqualifiedType(), DE->getExprLoc(), + /*RefersToCapture=*/true); + + // Build update: Var = InitExpr + IV * Step + ExprResult Update = + BuildCounterUpdate(SemaRef, S, RefExpr->getExprLoc(), PrivateRef, + InitExpr, IV, Step, /* Subtract */ false); + Update = SemaRef.ActOnFinishFullExpr(Update.get()); + + // Build final: Var = InitExpr + NumIterations * Step + ExprResult Final = + BuildCounterUpdate(SemaRef, S, RefExpr->getExprLoc(), PrivateRef, + InitExpr, NumIterations, Step, /* Subtract */ false); + Final = SemaRef.ActOnFinishFullExpr(Final.get()); + if (!Update.isUsable() || !Final.isUsable()) { + Updates.push_back(nullptr); + Finals.push_back(nullptr); + HasErrors = true; + } else { + Updates.push_back(Update.get()); + Finals.push_back(Final.get()); + } + ++CurInit; + } + Clause.setUpdates(Updates); + Clause.setFinals(Finals); + return HasErrors; } OMPClause *Sema::ActOnOpenMPAlignedClause( @@ -5137,10 +5992,8 @@ OMPClause *Sema::ActOnOpenMPAlignedClause( // OpenMP [2.8.1, simd construct, Restrictions] // The type of list items appearing in the aligned clause must be // array, pointer, reference to array, or reference to pointer. - QualType QType = DE->getType() - .getNonReferenceType() - .getUnqualifiedType() - .getCanonicalType(); + QualType QType = VD->getType(); + QType = QType.getNonReferenceType().getUnqualifiedType().getCanonicalType(); const Type *Ty = QType.getTypePtrOrNull(); if (!Ty || (!Ty->isDependentType() && !Ty->isArrayType() && !Ty->isPointerType())) { @@ -5190,11 +6043,17 @@ OMPClause *Sema::ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation LParenLoc, SourceLocation EndLoc) { SmallVector<Expr *, 8> Vars; + SmallVector<Expr *, 8> SrcExprs; + SmallVector<Expr *, 8> DstExprs; + SmallVector<Expr *, 8> AssignmentOps; for (auto &RefExpr : VarList) { assert(RefExpr && "NULL expr in OpenMP copyin clause."); if (isa<DependentScopeDeclRefExpr>(RefExpr)) { // It will be analyzed later. Vars.push_back(RefExpr); + SrcExprs.push_back(nullptr); + DstExprs.push_back(nullptr); + AssignmentOps.push_back(nullptr); continue; } @@ -5216,6 +6075,9 @@ OMPClause *Sema::ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, if (Type->isDependentType() || Type->isInstantiationDependentType()) { // It will be analyzed later. Vars.push_back(DE); + SrcExprs.push_back(nullptr); + DstExprs.push_back(nullptr); + AssignmentOps.push_back(nullptr); continue; } @@ -5232,40 +6094,38 @@ OMPClause *Sema::ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, // A variable of class type (or array thereof) that appears in a // copyin clause requires an accessible, unambiguous copy assignment // operator for the class type. - Type = Context.getBaseElementType(Type); - CXXRecordDecl *RD = - getLangOpts().CPlusPlus ? Type->getAsCXXRecordDecl() : nullptr; - // FIXME This code must be replaced by actual assignment of the - // threadprivate variable. - if (RD) { - CXXMethodDecl *MD = LookupCopyingAssignment(RD, 0, false, 0); - DeclAccessPair FoundDecl = DeclAccessPair::make(MD, MD->getAccess()); - if (MD) { - if (CheckMemberAccess(ELoc, RD, FoundDecl) == AR_inaccessible || - MD->isDeleted()) { - Diag(ELoc, diag::err_omp_required_method) - << getOpenMPClauseName(OMPC_copyin) << 2; - bool IsDecl = VD->isThisDeclarationADefinition(Context) == - VarDecl::DeclarationOnly; - Diag(VD->getLocation(), - IsDecl ? diag::note_previous_decl : diag::note_defined_here) - << VD; - Diag(RD->getLocation(), diag::note_previous_decl) << RD; - continue; - } - MarkFunctionReferenced(ELoc, MD); - DiagnoseUseOfDecl(MD, ELoc); - } - } + auto ElemType = Context.getBaseElementType(Type).getNonReferenceType(); + auto *SrcVD = buildVarDecl(*this, DE->getLocStart(), + ElemType.getUnqualifiedType(), ".copyin.src"); + auto *PseudoSrcExpr = buildDeclRefExpr( + *this, SrcVD, ElemType.getUnqualifiedType(), DE->getExprLoc()); + auto *DstVD = + buildVarDecl(*this, DE->getLocStart(), ElemType, ".copyin.dst"); + auto *PseudoDstExpr = + buildDeclRefExpr(*this, DstVD, ElemType, DE->getExprLoc()); + // For arrays generate assignment operation for single element and replace + // it by the original array element in CodeGen. + auto AssignmentOp = BuildBinOp(/*S=*/nullptr, DE->getExprLoc(), BO_Assign, + PseudoDstExpr, PseudoSrcExpr); + if (AssignmentOp.isInvalid()) + continue; + AssignmentOp = ActOnFinishFullExpr(AssignmentOp.get(), DE->getExprLoc(), + /*DiscardedValue=*/true); + if (AssignmentOp.isInvalid()) + continue; DSAStack->addDSA(VD, DE, OMPC_copyin); Vars.push_back(DE); + SrcExprs.push_back(PseudoSrcExpr); + DstExprs.push_back(PseudoDstExpr); + AssignmentOps.push_back(AssignmentOp.get()); } if (Vars.empty()) return nullptr; - return OMPCopyinClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars); + return OMPCopyinClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars, + SrcExprs, DstExprs, AssignmentOps); } OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, @@ -5273,11 +6133,17 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation LParenLoc, SourceLocation EndLoc) { SmallVector<Expr *, 8> Vars; + SmallVector<Expr *, 8> SrcExprs; + SmallVector<Expr *, 8> DstExprs; + SmallVector<Expr *, 8> AssignmentOps; for (auto &RefExpr : VarList) { assert(RefExpr && "NULL expr in OpenMP copyprivate clause."); if (isa<DependentScopeDeclRefExpr>(RefExpr)) { // It will be analyzed later. Vars.push_back(RefExpr); + SrcExprs.push_back(nullptr); + DstExprs.push_back(nullptr); + AssignmentOps.push_back(nullptr); continue; } @@ -5299,6 +6165,9 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, if (Type->isDependentType() || Type->isInstantiationDependentType()) { // It will be analyzed later. Vars.push_back(DE); + SrcExprs.push_back(nullptr); + DstExprs.push_back(nullptr); + AssignmentOps.push_back(nullptr); continue; } @@ -5307,8 +6176,8 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, // private or firstprivate clause on the single construct. if (!DSAStack->isThreadPrivate(VD)) { auto DVar = DSAStack->getTopDSA(VD, false); - if (DVar.CKind != OMPC_copyprivate && DVar.CKind != OMPC_unknown && - !(DVar.CKind == OMPC_private && !DVar.RefExpr)) { + if (DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_copyprivate && + DVar.RefExpr) { Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind) << getOpenMPClauseName(OMPC_copyprivate); @@ -5331,45 +6200,54 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, } } + // Variably modified types are not supported. + if (!Type->isAnyPointerType() && Type->isVariablyModifiedType()) { + Diag(ELoc, diag::err_omp_variably_modified_type_not_supported) + << getOpenMPClauseName(OMPC_copyprivate) << Type + << getOpenMPDirectiveName(DSAStack->getCurrentDirective()); + bool IsDecl = + VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; + Diag(VD->getLocation(), + IsDecl ? diag::note_previous_decl : diag::note_defined_here) + << VD; + continue; + } + // OpenMP [2.14.4.1, Restrictions, C/C++, p.2] // A variable of class type (or array thereof) that appears in a // copyin clause requires an accessible, unambiguous copy assignment // operator for the class type. - Type = Context.getBaseElementType(Type); - CXXRecordDecl *RD = - getLangOpts().CPlusPlus ? Type->getAsCXXRecordDecl() : nullptr; - // FIXME This code must be replaced by actual assignment of the - // threadprivate variable. - if (RD) { - CXXMethodDecl *MD = LookupCopyingAssignment(RD, 0, false, 0); - DeclAccessPair FoundDecl = DeclAccessPair::make(MD, MD->getAccess()); - if (MD) { - if (CheckMemberAccess(ELoc, RD, FoundDecl) == AR_inaccessible || - MD->isDeleted()) { - Diag(ELoc, diag::err_omp_required_method) - << getOpenMPClauseName(OMPC_copyprivate) << 2; - bool IsDecl = VD->isThisDeclarationADefinition(Context) == - VarDecl::DeclarationOnly; - Diag(VD->getLocation(), - IsDecl ? diag::note_previous_decl : diag::note_defined_here) - << VD; - Diag(RD->getLocation(), diag::note_previous_decl) << RD; - continue; - } - MarkFunctionReferenced(ELoc, MD); - DiagnoseUseOfDecl(MD, ELoc); - } - } + Type = Context.getBaseElementType(Type).getUnqualifiedType(); + auto *SrcVD = + buildVarDecl(*this, DE->getLocStart(), Type, ".copyprivate.src"); + auto *PseudoSrcExpr = + buildDeclRefExpr(*this, SrcVD, Type, DE->getExprLoc()); + auto *DstVD = + buildVarDecl(*this, DE->getLocStart(), Type, ".copyprivate.dst"); + auto *PseudoDstExpr = + buildDeclRefExpr(*this, DstVD, Type, DE->getExprLoc()); + auto AssignmentOp = BuildBinOp(/*S=*/nullptr, DE->getExprLoc(), BO_Assign, + PseudoDstExpr, PseudoSrcExpr); + if (AssignmentOp.isInvalid()) + continue; + AssignmentOp = ActOnFinishFullExpr(AssignmentOp.get(), DE->getExprLoc(), + /*DiscardedValue=*/true); + if (AssignmentOp.isInvalid()) + continue; // No need to mark vars as copyprivate, they are already threadprivate or // implicitly private. Vars.push_back(DE); + SrcExprs.push_back(PseudoSrcExpr); + DstExprs.push_back(PseudoDstExpr); + AssignmentOps.push_back(AssignmentOp.get()); } if (Vars.empty()) return nullptr; - return OMPCopyprivateClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars); + return OMPCopyprivateClause::Create(Context, StartLoc, LParenLoc, EndLoc, + Vars, SrcExprs, DstExprs, AssignmentOps); } OMPClause *Sema::ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp index 9195ee5..9d87a10 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp @@ -286,6 +286,16 @@ StandardConversionSequence::getNarrowingKind(ASTContext &Ctx, QualType FromType = getToType(0); QualType ToType = getToType(1); switch (Second) { + // 'bool' is an integral type; dispatch to the right place to handle it. + case ICK_Boolean_Conversion: + if (FromType->isRealFloatingType()) + goto FloatingIntegralConversion; + if (FromType->isIntegralOrUnscopedEnumerationType()) + goto IntegralConversion; + // Boolean conversions can be from pointers and pointers to members + // [conv.bool], and those aren't considered narrowing conversions. + return NK_Not_Narrowing; + // -- from a floating-point type to an integer type, or // // -- from an integer type or unscoped enumeration type to a floating-point @@ -293,6 +303,7 @@ StandardConversionSequence::getNarrowingKind(ASTContext &Ctx, // value after conversion will fit into the target type and will produce // the original value when converted back to the original type, or case ICK_Floating_Integral: + FloatingIntegralConversion: if (FromType->isRealFloatingType() && ToType->isIntegralType(Ctx)) { return NK_Type_Narrowing; } else if (FromType->isIntegralType(Ctx) && ToType->isRealFloatingType()) { @@ -357,13 +368,8 @@ StandardConversionSequence::getNarrowingKind(ASTContext &Ctx, // the source is a constant expression and the actual value after // conversion will fit into the target type and will produce the original // value when converted back to the original type. - case ICK_Boolean_Conversion: // Bools are integers too. - if (!FromType->isIntegralOrUnscopedEnumerationType()) { - // Boolean conversions can be from pointers and pointers to members - // [conv.bool], and those aren't considered narrowing conversions. - return NK_Not_Narrowing; - } // Otherwise, fall through to the integral case. - case ICK_Integral_Conversion: { + case ICK_Integral_Conversion: + IntegralConversion: { assert(FromType->isIntegralOrUnscopedEnumerationType()); assert(ToType->isIntegralOrUnscopedEnumerationType()); const bool FromSigned = FromType->isSignedIntegerOrEnumerationType(); @@ -1098,11 +1104,11 @@ TryUserDefinedConversion(Sema &S, Expr *From, QualType ToType, // Attempt user-defined conversion. OverloadCandidateSet Conversions(From->getExprLoc(), OverloadCandidateSet::CSK_Normal); - OverloadingResult UserDefResult - = IsUserDefinedConversion(S, From, ToType, ICS.UserDefined, Conversions, - AllowExplicit, AllowObjCConversionOnExplicit); - - if (UserDefResult == OR_Success) { + switch (IsUserDefinedConversion(S, From, ToType, ICS.UserDefined, + Conversions, AllowExplicit, + AllowObjCConversionOnExplicit)) { + case OR_Success: + case OR_Deleted: ICS.setUserDefined(); ICS.UserDefined.Before.setAsIdentityConversion(); // C++ [over.ics.user]p4: @@ -1131,7 +1137,9 @@ TryUserDefinedConversion(Sema &S, Expr *From, QualType ToType, ICS.Standard.Second = ICK_Derived_To_Base; } } - } else if (UserDefResult == OR_Ambiguous && !SuppressUserConversions) { + break; + + case OR_Ambiguous: ICS.setAmbiguous(); ICS.Ambiguous.setFromType(From->getType()); ICS.Ambiguous.setToType(ToType); @@ -1139,8 +1147,12 @@ TryUserDefinedConversion(Sema &S, Expr *From, QualType ToType, Cand != Conversions.end(); ++Cand) if (Cand->Viable) ICS.Ambiguous.addConversion(Cand->Function); - } else { + break; + + // Fall through. + case OR_No_Viable_Function: ICS.setBad(BadConversionSequence::no_conversion, From, ToType); + break; } return ICS; @@ -1740,18 +1752,20 @@ bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) { return false; // We can perform an integral promotion to the underlying type of the enum, - // even if that's not the promoted type. + // even if that's not the promoted type. Note that the check for promoting + // the underlying type is based on the type alone, and does not consider + // the bitfield-ness of the actual source expression. if (FromEnumType->getDecl()->isFixed()) { QualType Underlying = FromEnumType->getDecl()->getIntegerType(); return Context.hasSameUnqualifiedType(Underlying, ToType) || - IsIntegralPromotion(From, Underlying, ToType); + IsIntegralPromotion(nullptr, Underlying, ToType); } // We have already pre-calculated the promotion type, so this is trivial. if (ToType->isIntegerType() && !RequireCompleteType(From->getLocStart(), FromType, 0)) - return Context.hasSameUnqualifiedType(ToType, - FromEnumType->getDecl()->getPromotionType()); + return Context.hasSameUnqualifiedType( + ToType, FromEnumType->getDecl()->getPromotionType()); } // C++0x [conv.prom]p2: @@ -1799,13 +1813,12 @@ bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) { // other value of that type for promotion purposes (C++ 4.5p3). // FIXME: We should delay checking of bit-fields until we actually perform the // conversion. - using llvm::APSInt; - if (From) + if (From) { if (FieldDecl *MemberDecl = From->getSourceBitField()) { - APSInt BitWidth; + llvm::APSInt BitWidth; if (FromType->isIntegralType(Context) && MemberDecl->getBitWidth()->isIntegerConstantExpr(BitWidth, Context)) { - APSInt ToSize(BitWidth.getBitWidth(), BitWidth.isUnsigned()); + llvm::APSInt ToSize(BitWidth.getBitWidth(), BitWidth.isUnsigned()); ToSize = Context.getTypeSize(ToType); // Are we promoting to an int from a bitfield that fits in an int? @@ -1823,6 +1836,7 @@ bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) { return false; } } + } // An rvalue of type bool can be converted to an rvalue of type int, // with false becoming zero and true becoming one (C++ 4.5p4). @@ -2940,7 +2954,10 @@ IsInitializerListConstructorConversion(Sema &S, Expr *From, QualType ToType, bool HadMultipleCandidates = (CandidateSet.size() > 1); OverloadCandidateSet::iterator Best; - switch (CandidateSet.BestViableFunction(S, From->getLocStart(), Best, true)) { + switch (auto Result = + CandidateSet.BestViableFunction(S, From->getLocStart(), + Best, true)) { + case OR_Deleted: case OR_Success: { // Record the standard conversion we used and the conversion function. CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Best->Function); @@ -2953,13 +2970,11 @@ IsInitializerListConstructorConversion(Sema &S, Expr *From, QualType ToType, User.After.setAsIdentityConversion(); User.After.setFromType(ThisType->getAs<PointerType>()->getPointeeType()); User.After.setAllToTypes(ToType); - return OR_Success; + return Result; } case OR_No_Viable_Function: return OR_No_Viable_Function; - case OR_Deleted: - return OR_Deleted; case OR_Ambiguous: return OR_Ambiguous; } @@ -3093,11 +3108,8 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType, if (CXXRecordDecl *FromRecordDecl = dyn_cast<CXXRecordDecl>(FromRecordType->getDecl())) { // Add all of the conversion functions as candidates. - std::pair<CXXRecordDecl::conversion_iterator, - CXXRecordDecl::conversion_iterator> - Conversions = FromRecordDecl->getVisibleConversionFunctions(); - for (CXXRecordDecl::conversion_iterator - I = Conversions.first, E = Conversions.second; I != E; ++I) { + const auto &Conversions = FromRecordDecl->getVisibleConversionFunctions(); + for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) { DeclAccessPair FoundDecl = I.getPair(); NamedDecl *D = FoundDecl.getDecl(); CXXRecordDecl *ActingContext = cast<CXXRecordDecl>(D->getDeclContext()); @@ -3129,8 +3141,10 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType, bool HadMultipleCandidates = (CandidateSet.size() > 1); OverloadCandidateSet::iterator Best; - switch (CandidateSet.BestViableFunction(S, From->getLocStart(), Best, true)) { + switch (auto Result = CandidateSet.BestViableFunction(S, From->getLocStart(), + Best, true)) { case OR_Success: + case OR_Deleted: // Record the standard conversion we used and the conversion function. if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Best->Function)) { @@ -3158,7 +3172,7 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType, User.After.setAsIdentityConversion(); User.After.setFromType(ThisType->getAs<PointerType>()->getPointeeType()); User.After.setAllToTypes(ToType); - return OR_Success; + return Result; } if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(Best->Function)) { @@ -3184,15 +3198,12 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType, // user-defined conversion sequence (see 13.3.3 and // 13.3.3.1). User.After = Best->FinalConversion; - return OR_Success; + return Result; } llvm_unreachable("Not a constructor or conversion function?"); case OR_No_Viable_Function: return OR_No_Viable_Function; - case OR_Deleted: - // No conversion here! We're done. - return OR_Deleted; case OR_Ambiguous: return OR_Ambiguous; @@ -3329,7 +3340,26 @@ CompareImplicitConversionSequences(Sema &S, // Two implicit conversion sequences of the same form are // indistinguishable conversion sequences unless one of the // following rules apply: (C++ 13.3.3.2p3): + + // List-initialization sequence L1 is a better conversion sequence than + // list-initialization sequence L2 if: + // - L1 converts to std::initializer_list<X> for some X and L2 does not, or, + // if not that, + // - L1 converts to type "array of N1 T", L2 converts to type "array of N2 T", + // and N1 is smaller than N2., + // even if one of the other rules in this paragraph would otherwise apply. + if (!ICS1.isBad()) { + if (ICS1.isStdInitializerListElement() && + !ICS2.isStdInitializerListElement()) + return ImplicitConversionSequence::Better; + if (!ICS1.isStdInitializerListElement() && + ICS2.isStdInitializerListElement()) + return ImplicitConversionSequence::Worse; + } + if (ICS1.isStandard()) + // Standard conversion sequence S1 is a better conversion sequence than + // standard conversion sequence S2 if [...] Result = CompareStandardConversionSequences(S, ICS1.Standard, ICS2.Standard); else if (ICS1.isUserDefined()) { @@ -3350,19 +3380,6 @@ CompareImplicitConversionSequences(Sema &S, ICS2.UserDefined.ConversionFunction); } - // List-initialization sequence L1 is a better conversion sequence than - // list-initialization sequence L2 if L1 converts to std::initializer_list<X> - // for some X and L2 does not. - if (Result == ImplicitConversionSequence::Indistinguishable && - !ICS1.isBad()) { - if (ICS1.isStdInitializerListElement() && - !ICS2.isStdInitializerListElement()) - return ImplicitConversionSequence::Better; - if (!ICS1.isStdInitializerListElement() && - ICS2.isStdInitializerListElement()) - return ImplicitConversionSequence::Worse; - } - return Result; } @@ -4034,11 +4051,8 @@ FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS, = dyn_cast<CXXRecordDecl>(T2->getAs<RecordType>()->getDecl()); OverloadCandidateSet CandidateSet(DeclLoc, OverloadCandidateSet::CSK_Normal); - std::pair<CXXRecordDecl::conversion_iterator, - CXXRecordDecl::conversion_iterator> - Conversions = T2RecordDecl->getVisibleConversionFunctions(); - for (CXXRecordDecl::conversion_iterator - I = Conversions.first, E = Conversions.second; I != E; ++I) { + const auto &Conversions = T2RecordDecl->getVisibleConversionFunctions(); + for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) { NamedDecl *D = *I; CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(D->getDeclContext()); if (isa<UsingShadowDecl>(D)) @@ -4254,16 +4268,6 @@ TryReferenceInit(Sema &S, Expr *Init, QualType DeclType, // -- Otherwise, the reference shall be an lvalue reference to a // non-volatile const type (i.e., cv1 shall be const), or the reference // shall be an rvalue reference. - // - // We actually handle one oddity of C++ [over.ics.ref] at this - // point, which is that, due to p2 (which short-circuits reference - // binding by only attempting a simple conversion for non-direct - // bindings) and p3's strange wording, we allow a const volatile - // reference to bind to an rvalue. Hence the check for the presence - // of "const" rather than checking for "const" being the only - // qualifier. - // This is also the point where rvalue references and lvalue inits no longer - // go together. if (!isRValRef && (!T1.isConstQualified() || T1.isVolatileQualified())) return ICS; @@ -4456,11 +4460,57 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType, if (S.RequireCompleteType(From->getLocStart(), ToType, 0)) return Result; + // Per DR1467: + // If the parameter type is a class X and the initializer list has a single + // element of type cv U, where U is X or a class derived from X, the + // implicit conversion sequence is the one required to convert the element + // to the parameter type. + // + // Otherwise, if the parameter type is a character array [... ] + // and the initializer list has a single element that is an + // appropriately-typed string literal (8.5.2 [dcl.init.string]), the + // implicit conversion sequence is the identity conversion. + if (From->getNumInits() == 1) { + if (ToType->isRecordType()) { + QualType InitType = From->getInit(0)->getType(); + if (S.Context.hasSameUnqualifiedType(InitType, ToType) || + S.IsDerivedFrom(InitType, ToType)) + return TryCopyInitialization(S, From->getInit(0), ToType, + SuppressUserConversions, + InOverloadResolution, + AllowObjCWritebackConversion); + } + // FIXME: Check the other conditions here: array of character type, + // initializer is a string literal. + if (ToType->isArrayType()) { + InitializedEntity Entity = + InitializedEntity::InitializeParameter(S.Context, ToType, + /*Consumed=*/false); + if (S.CanPerformCopyInitialization(Entity, From)) { + Result.setStandard(); + Result.Standard.setAsIdentityConversion(); + Result.Standard.setFromType(ToType); + Result.Standard.setAllToTypes(ToType); + return Result; + } + } + } + + // C++14 [over.ics.list]p2: Otherwise, if the parameter type [...] (below). // C++11 [over.ics.list]p2: // If the parameter type is std::initializer_list<X> or "array of X" and // all the elements can be implicitly converted to X, the implicit // conversion sequence is the worst conversion necessary to convert an // element of the list to X. + // + // C++14 [over.ics.list]p3: + // Otherwise, if the parameter type is "array of N X", if the initializer + // list has exactly N elements or if it has fewer than N elements and X is + // default-constructible, and if all the elements of the initializer list + // can be implicitly converted to X, the implicit conversion sequence is + // the worst conversion necessary to convert an element of the list to X. + // + // FIXME: We're missing a lot of these checks. bool toStdInitializerList = false; QualType X; if (ToType->isArrayType()) @@ -4499,6 +4549,7 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType, return Result; } + // C++14 [over.ics.list]p4: // C++11 [over.ics.list]p3: // Otherwise, if the parameter is a non-aggregate class X and overload // resolution chooses a single best constructor [...] the implicit @@ -4514,6 +4565,7 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType, /*AllowObjCConversionOnExplicit=*/false); } + // C++14 [over.ics.list]p5: // C++11 [over.ics.list]p4: // Otherwise, if the parameter has an aggregate type which can be // initialized from the initializer list [...] the implicit conversion @@ -4540,6 +4592,7 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType, return Result; } + // C++14 [over.ics.list]p6: // C++11 [over.ics.list]p5: // Otherwise, if the parameter is a reference, see 13.3.3.1.4. if (ToType->isReferenceType()) { @@ -4608,14 +4661,15 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType, return Result; } + // C++14 [over.ics.list]p7: // C++11 [over.ics.list]p6: // Otherwise, if the parameter type is not a class: if (!ToType->isRecordType()) { - // - if the initializer list has one element, the implicit conversion - // sequence is the one required to convert the element to the - // parameter type. + // - if the initializer list has one element that is not itself an + // initializer list, the implicit conversion sequence is the one + // required to convert the element to the parameter type. unsigned NumInits = From->getNumInits(); - if (NumInits == 1) + if (NumInits == 1 && !isa<InitListExpr>(From->getInit(0))) Result = TryCopyInitialization(S, From->getInit(0), ToType, SuppressUserConversions, InOverloadResolution, @@ -4631,6 +4685,7 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType, return Result; } + // C++14 [over.ics.list]p8: // C++11 [over.ics.list]p7: // In all cases other than those enumerated above, no conversion is possible return Result; @@ -5347,21 +5402,18 @@ ExprResult Sema::PerformContextualImplicitConversion( UnresolvedSet<4> ViableConversions; // These are *potentially* viable in C++1y. UnresolvedSet<4> ExplicitConversions; - std::pair<CXXRecordDecl::conversion_iterator, - CXXRecordDecl::conversion_iterator> Conversions = + const auto &Conversions = cast<CXXRecordDecl>(RecordTy->getDecl())->getVisibleConversionFunctions(); bool HadMultipleCandidates = - (std::distance(Conversions.first, Conversions.second) > 1); + (std::distance(Conversions.begin(), Conversions.end()) > 1); // To check that there is only one target type, in C++1y: QualType ToType; bool HasUniqueTargetType = true; // Collect explicit or viable (potentially in C++1y) conversions. - for (CXXRecordDecl::conversion_iterator I = Conversions.first, - E = Conversions.second; - I != E; ++I) { + for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) { NamedDecl *D = (*I)->getUnderlyingDecl(); CXXConversionDecl *Conversion; FunctionTemplateDecl *ConvTemplate = dyn_cast<FunctionTemplateDecl>(D); @@ -5554,7 +5606,8 @@ Sema::AddOverloadCandidate(FunctionDecl *Function, // is irrelevant. AddMethodCandidate(Method, FoundDecl, Method->getParent(), QualType(), Expr::Classification::makeSimpleLValue(), - Args, CandidateSet, SuppressUserConversions); + Args, CandidateSet, SuppressUserConversions, + PartialOverloading); return; } // We treat a constructor like a non-member function, since its object @@ -5615,7 +5668,7 @@ Sema::AddOverloadCandidate(FunctionDecl *Function, // (C++ 13.3.2p2): A candidate function having fewer than m // parameters is viable only if it has an ellipsis in its parameter // list (8.3.5). - if ((Args.size() + (PartialOverloading && Args.size())) > NumParams && + if (TooManyArguments(NumParams, Args.size(), PartialOverloading) && !Proto->isVariadic()) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_too_many_arguments; @@ -5850,8 +5903,9 @@ EnableIfAttr *Sema::CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, void Sema::AddFunctionCandidates(const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, + TemplateArgumentListInfo *ExplicitTemplateArgs, bool SuppressUserConversions, - TemplateArgumentListInfo *ExplicitTemplateArgs) { + bool PartialOverloading) { for (UnresolvedSetIterator F = Fns.begin(), E = Fns.end(); F != E; ++F) { NamedDecl *D = F.getDecl()->getUnderlyingDecl(); if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { @@ -5860,10 +5914,10 @@ void Sema::AddFunctionCandidates(const UnresolvedSetImpl &Fns, cast<CXXMethodDecl>(FD)->getParent(), Args[0]->getType(), Args[0]->Classify(Context), Args.slice(1), CandidateSet, - SuppressUserConversions); + SuppressUserConversions, PartialOverloading); else AddOverloadCandidate(FD, F.getPair(), Args, CandidateSet, - SuppressUserConversions); + SuppressUserConversions, PartialOverloading); } else { FunctionTemplateDecl *FunTmpl = cast<FunctionTemplateDecl>(D); if (isa<CXXMethodDecl>(FunTmpl->getTemplatedDecl()) && @@ -5873,11 +5927,13 @@ void Sema::AddFunctionCandidates(const UnresolvedSetImpl &Fns, ExplicitTemplateArgs, Args[0]->getType(), Args[0]->Classify(Context), Args.slice(1), - CandidateSet, SuppressUserConversions); + CandidateSet, SuppressUserConversions, + PartialOverloading); else AddTemplateOverloadCandidate(FunTmpl, F.getPair(), ExplicitTemplateArgs, Args, - CandidateSet, SuppressUserConversions); + CandidateSet, SuppressUserConversions, + PartialOverloading); } } } @@ -5925,7 +5981,8 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, - bool SuppressUserConversions) { + bool SuppressUserConversions, + bool PartialOverloading) { const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(Method->getType()->getAs<FunctionType>()); assert(Proto && "Methods without a prototype cannot be overloaded"); @@ -5958,7 +6015,8 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, // (C++ 13.3.2p2): A candidate function having fewer than m // parameters is viable only if it has an ellipsis in its parameter // list (8.3.5). - if (Args.size() > NumParams && !Proto->isVariadic()) { + if (TooManyArguments(NumParams, Args.size(), PartialOverloading) && + !Proto->isVariadic()) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_too_many_arguments; return; @@ -5970,7 +6028,7 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, // parameter list is truncated on the right, so that there are // exactly m parameters. unsigned MinRequiredArgs = Method->getMinRequiredArguments(); - if (Args.size() < MinRequiredArgs) { + if (Args.size() < MinRequiredArgs && !PartialOverloading) { // Not enough arguments. Candidate.Viable = false; Candidate.FailureKind = ovl_fail_too_few_arguments; @@ -6052,7 +6110,8 @@ Sema::AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, - bool SuppressUserConversions) { + bool SuppressUserConversions, + bool PartialOverloading) { if (!CandidateSet.isNewCandidate(MethodTmpl)) return; @@ -6069,7 +6128,7 @@ Sema::AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, FunctionDecl *Specialization = nullptr; if (TemplateDeductionResult Result = DeduceTemplateArguments(MethodTmpl, ExplicitTemplateArgs, Args, - Specialization, Info)) { + Specialization, Info, PartialOverloading)) { OverloadCandidate &Candidate = CandidateSet.addCandidate(); Candidate.FoundDecl = FoundDecl; Candidate.Function = MethodTmpl->getTemplatedDecl(); @@ -6090,7 +6149,7 @@ Sema::AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, "Specialization is not a member function?"); AddMethodCandidate(cast<CXXMethodDecl>(Specialization), FoundDecl, ActingContext, ObjectType, ObjectClassification, Args, - CandidateSet, SuppressUserConversions); + CandidateSet, SuppressUserConversions, PartialOverloading); } /// \brief Add a C++ function template specialization as a candidate @@ -6102,7 +6161,8 @@ Sema::AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, - bool SuppressUserConversions) { + bool SuppressUserConversions, + bool PartialOverloading) { if (!CandidateSet.isNewCandidate(FunctionTemplate)) return; @@ -6119,7 +6179,7 @@ Sema::AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, FunctionDecl *Specialization = nullptr; if (TemplateDeductionResult Result = DeduceTemplateArguments(FunctionTemplate, ExplicitTemplateArgs, Args, - Specialization, Info)) { + Specialization, Info, PartialOverloading)) { OverloadCandidate &Candidate = CandidateSet.addCandidate(); Candidate.FoundDecl = FoundDecl; Candidate.Function = FunctionTemplate->getTemplatedDecl(); @@ -6137,7 +6197,7 @@ Sema::AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, // deduction as a candidate. assert(Specialization && "Missing function template specialization?"); AddOverloadCandidate(Specialization, FoundDecl, Args, CandidateSet, - SuppressUserConversions); + SuppressUserConversions, PartialOverloading); } /// Determine whether this is an allowable conversion from the result @@ -6884,12 +6944,7 @@ BuiltinCandidateTypeSet::AddTypesConvertedFrom(QualType Ty, return; CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(TyRec->getDecl()); - std::pair<CXXRecordDecl::conversion_iterator, - CXXRecordDecl::conversion_iterator> - Conversions = ClassDecl->getVisibleConversionFunctions(); - for (CXXRecordDecl::conversion_iterator - I = Conversions.first, E = Conversions.second; I != E; ++I) { - NamedDecl *D = I.getDecl(); + for (NamedDecl *D : ClassDecl->getVisibleConversionFunctions()) { if (isa<UsingShadowDecl>(D)) D = cast<UsingShadowDecl>(D)->getTargetDecl(); @@ -6953,13 +7008,7 @@ static Qualifiers CollectVRQualifiers(ASTContext &Context, Expr* ArgExpr) { if (!ClassDecl->hasDefinition()) return VRQuals; - std::pair<CXXRecordDecl::conversion_iterator, - CXXRecordDecl::conversion_iterator> - Conversions = ClassDecl->getVisibleConversionFunctions(); - - for (CXXRecordDecl::conversion_iterator - I = Conversions.first, E = Conversions.second; I != E; ++I) { - NamedDecl *D = I.getDecl(); + for (NamedDecl *D : ClassDecl->getVisibleConversionFunctions()) { if (isa<UsingShadowDecl>(D)) D = cast<UsingShadowDecl>(D)->getTargetDecl(); if (CXXConversionDecl *Conv = dyn_cast<CXXConversionDecl>(D)) { @@ -8078,7 +8127,7 @@ void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, bool HasArithmeticOrEnumeralCandidateType = false; SmallVector<BuiltinCandidateTypeSet, 2> CandidateTypes; for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) { - CandidateTypes.push_back(BuiltinCandidateTypeSet(*this)); + CandidateTypes.emplace_back(*this); CandidateTypes[ArgIdx].AddTypesConvertedFrom(Args[ArgIdx]->getType(), OpLoc, true, @@ -8291,7 +8340,7 @@ Sema::AddArgumentDependentLookupCandidates(DeclarationName Name, } else AddTemplateOverloadCandidate(cast<FunctionTemplateDecl>(*I), FoundDecl, ExplicitTemplateArgs, - Args, CandidateSet); + Args, CandidateSet, PartialOverloading); } } @@ -9451,10 +9500,7 @@ struct CompareOverloadCandidatesForDisplay { numLFixes = (numLFixes == 0) ? UINT_MAX : numLFixes; numRFixes = (numRFixes == 0) ? UINT_MAX : numRFixes; if (numLFixes != numRFixes) { - if (numLFixes < numRFixes) - return true; - else - return false; + return numLFixes < numRFixes; } // If there's any ordering between the defined conversions... @@ -10363,7 +10409,8 @@ static void AddOverloadedCallCandidate(Sema &S, assert(!KnownValid && "Explicit template arguments?"); return; } - S.AddOverloadCandidate(Func, FoundDecl, Args, CandidateSet, false, + S.AddOverloadCandidate(Func, FoundDecl, Args, CandidateSet, + /*SuppressUsedConversions=*/false, PartialOverloading); return; } @@ -10371,7 +10418,9 @@ static void AddOverloadedCallCandidate(Sema &S, if (FunctionTemplateDecl *FuncTemplate = dyn_cast<FunctionTemplateDecl>(Callee)) { S.AddTemplateOverloadCandidate(FuncTemplate, FoundDecl, - ExplicitTemplateArgs, Args, CandidateSet); + ExplicitTemplateArgs, Args, CandidateSet, + /*SuppressUsedConversions=*/false, + PartialOverloading); return; } @@ -10896,7 +10945,7 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn, OverloadCandidateSet CandidateSet(OpLoc, OverloadCandidateSet::CSK_Operator); // Add the candidates from the given function set. - AddFunctionCandidates(Fns, ArgsArray, CandidateSet, false); + AddFunctionCandidates(Fns, ArgsArray, CandidateSet); // Add operator candidates that are member functions. AddMemberOperatorCandidates(Op, OpLoc, ArgsArray, CandidateSet); @@ -11105,7 +11154,7 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc, OverloadCandidateSet CandidateSet(OpLoc, OverloadCandidateSet::CSK_Operator); // Add the candidates from the given function set. - AddFunctionCandidates(Fns, Args, CandidateSet, false); + AddFunctionCandidates(Fns, Args, CandidateSet); // Add operator candidates that are member functions. AddMemberOperatorCandidates(Op, OpLoc, Args, CandidateSet); @@ -11546,6 +11595,10 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE, return MaybeBindToTemporary(call); } + if (isa<CXXPseudoDestructorExpr>(NakedMemExpr)) + return new (Context) + CallExpr(Context, MemExprE, Args, Context.VoidTy, VK_RValue, RParenLoc); + UnbridgedCastsSet UnbridgedCasts; if (checkArgPlaceholdersForOverload(*this, Args, UnbridgedCasts)) return ExprError(); @@ -11805,11 +11858,9 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj, // functions for each conversion function declared in an // accessible base class provided the function is not hidden // within T by another intervening declaration. - std::pair<CXXRecordDecl::conversion_iterator, - CXXRecordDecl::conversion_iterator> Conversions - = cast<CXXRecordDecl>(Record->getDecl())->getVisibleConversionFunctions(); - for (CXXRecordDecl::conversion_iterator - I = Conversions.first, E = Conversions.second; I != E; ++I) { + const auto &Conversions = + cast<CXXRecordDecl>(Record->getDecl())->getVisibleConversionFunctions(); + for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) { NamedDecl *D = *I; CXXRecordDecl *ActingContext = cast<CXXRecordDecl>(D->getDeclContext()); if (isa<UsingShadowDecl>(D)) @@ -12154,8 +12205,8 @@ ExprResult Sema::BuildLiteralOperatorCall(LookupResult &R, OverloadCandidateSet CandidateSet(UDSuffixLoc, OverloadCandidateSet::CSK_Normal); - AddFunctionCandidates(R.asUnresolvedSet(), Args, CandidateSet, true, - TemplateArgs); + AddFunctionCandidates(R.asUnresolvedSet(), Args, CandidateSet, TemplateArgs, + /*SuppressUserConversions=*/true); bool HadMultipleCandidates = (CandidateSet.size() > 1); @@ -12440,15 +12491,11 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found, type = Context.BoundMemberTy; } - MemberExpr *ME = MemberExpr::Create(Context, Base, - MemExpr->isArrow(), - MemExpr->getQualifierLoc(), - MemExpr->getTemplateKeywordLoc(), - Fn, - Found, - MemExpr->getMemberNameInfo(), - TemplateArgs, - type, valueKind, OK_Ordinary); + MemberExpr *ME = MemberExpr::Create( + Context, Base, MemExpr->isArrow(), MemExpr->getOperatorLoc(), + MemExpr->getQualifierLoc(), MemExpr->getTemplateKeywordLoc(), Fn, Found, + MemExpr->getMemberNameInfo(), TemplateArgs, type, valueKind, + OK_Ordinary); ME->setHadMultipleCandidates(true); MarkMemberReferenced(ME); return ME; diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp index 5e92d5d..3e465af 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp @@ -1051,17 +1051,13 @@ Sema::ObjCSubscriptKind // Look for a conversion to an integral, enumeration type, or // objective-C pointer type. - std::pair<CXXRecordDecl::conversion_iterator, - CXXRecordDecl::conversion_iterator> Conversions - = cast<CXXRecordDecl>(RecordTy->getDecl())->getVisibleConversionFunctions(); - int NoIntegrals=0, NoObjCIdPointers=0; SmallVector<CXXConversionDecl *, 4> ConversionDecls; - - for (CXXRecordDecl::conversion_iterator - I = Conversions.first, E = Conversions.second; I != E; ++I) { - if (CXXConversionDecl *Conversion - = dyn_cast<CXXConversionDecl>((*I)->getUnderlyingDecl())) { + + for (NamedDecl *D : cast<CXXRecordDecl>(RecordTy->getDecl()) + ->getVisibleConversionFunctions()) { + if (CXXConversionDecl *Conversion = + dyn_cast<CXXConversionDecl>(D->getUnderlyingDecl())) { QualType CT = Conversion->getConversionType().getNonReferenceType(); if (CT->isIntegralOrEnumerationType()) { ++NoIntegrals; @@ -1196,7 +1192,7 @@ bool ObjCSubscriptOpBuilder::findAtIndexGetter() { AtIndexGetter = S.LookupInstanceMethodInGlobalPool(AtIndexGetterSelector, RefExpr->getSourceRange(), - true, false); + true); } if (AtIndexGetter) { @@ -1318,7 +1314,7 @@ bool ObjCSubscriptOpBuilder::findAtIndexSetter() { AtIndexSetter = S.LookupInstanceMethodInGlobalPool(AtIndexSetterSelector, RefExpr->getSourceRange(), - true, false); + true); } bool err = false; @@ -1446,7 +1442,7 @@ ExprResult MSPropertyOpBuilder::buildGet() { ExprResult GetterExpr = S.ActOnMemberAccessExpr( S.getCurScope(), RefExpr->getBaseExpr(), SourceLocation(), RefExpr->isArrow() ? tok::arrow : tok::period, SS, SourceLocation(), - GetterName, nullptr, true); + GetterName, nullptr); if (GetterExpr.isInvalid()) { S.Diag(RefExpr->getMemberLoc(), diag::error_cannot_find_suitable_accessor) << 0 /* getter */ @@ -1476,7 +1472,7 @@ ExprResult MSPropertyOpBuilder::buildSet(Expr *op, SourceLocation sl, ExprResult SetterExpr = S.ActOnMemberAccessExpr( S.getCurScope(), RefExpr->getBaseExpr(), SourceLocation(), RefExpr->isArrow() ? tok::arrow : tok::period, SS, SourceLocation(), - SetterName, nullptr, true); + SetterName, nullptr); if (SetterExpr.isInvalid()) { S.Diag(RefExpr->getMemberLoc(), diag::error_cannot_find_suitable_accessor) << 1 /* setter */ diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp index 0d1da45..5c72529 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp @@ -15,6 +15,7 @@ #include "clang/AST/ASTContext.h" #include "clang/AST/ASTDiagnostic.h" #include "clang/AST/CharUnits.h" +#include "clang/AST/CXXInheritance.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/EvaluatedExprVisitor.h" #include "clang/AST/ExprCXX.h" @@ -23,12 +24,14 @@ #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/AST/TypeLoc.h" +#include "clang/AST/TypeOrdering.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/Initialization.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/DenseMap.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallString.h" @@ -236,7 +239,9 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) { // is written in a macro body, only warn if it has the warn_unused_result // attribute. if (const Decl *FD = CE->getCalleeDecl()) { - if (FD->hasAttr<WarnUnusedResultAttr>()) { + const FunctionDecl *Func = dyn_cast<FunctionDecl>(FD); + if (Func ? Func->hasUnusedResultAttr() + : FD->hasAttr<WarnUnusedResultAttr>()) { Diag(Loc, diag::warn_unused_result) << R1 << R2; return; } @@ -265,10 +270,6 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) { Diag(Loc, diag::warn_unused_result) << R1 << R2; return; } - if (MD->isPropertyAccessor()) { - Diag(Loc, diag::warn_unused_property_expr); - return; - } } } else if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { const Expr *Source = POE->getSyntacticForm(); @@ -687,26 +688,39 @@ static void checkCaseValue(Sema &S, SourceLocation Loc, const llvm::APSInt &Val, } } +typedef SmallVector<std::pair<llvm::APSInt, EnumConstantDecl*>, 64> EnumValsTy; + /// Returns true if we should emit a diagnostic about this case expression not /// being a part of the enum used in the switch controlling expression. -static bool ShouldDiagnoseSwitchCaseNotInEnum(const ASTContext &Ctx, +static bool ShouldDiagnoseSwitchCaseNotInEnum(const Sema &S, const EnumDecl *ED, - const Expr *CaseExpr) { - // Don't warn if the 'case' expression refers to a static const variable of - // the enum type. - CaseExpr = CaseExpr->IgnoreParenImpCasts(); - if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CaseExpr)) { + const Expr *CaseExpr, + EnumValsTy::iterator &EI, + EnumValsTy::iterator &EIEnd, + const llvm::APSInt &Val) { + bool FlagType = ED->hasAttr<FlagEnumAttr>(); + + if (const DeclRefExpr *DRE = + dyn_cast<DeclRefExpr>(CaseExpr->IgnoreParenImpCasts())) { if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) { - if (!VD->hasGlobalStorage()) - return true; QualType VarType = VD->getType(); - if (!VarType.isConstQualified()) - return true; - QualType EnumType = Ctx.getTypeDeclType(ED); - if (Ctx.hasSameUnqualifiedType(EnumType, VarType)) + QualType EnumType = S.Context.getTypeDeclType(ED); + if (VD->hasGlobalStorage() && VarType.isConstQualified() && + S.Context.hasSameUnqualifiedType(EnumType, VarType)) return false; } } + + if (FlagType) { + return !S.IsValueInFlagEnum(ED, Val, false); + } else { + while (EI != EIEnd && EI->first < Val) + EI++; + + if (EI != EIEnd && EI->first == Val) + return false; + } + return true; } @@ -897,12 +911,12 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, if (PrevString == CurrString) Diag(CaseVals[i].second->getLHS()->getLocStart(), diag::err_duplicate_case) << - (PrevString.empty() ? CaseValStr.str() : PrevString); + (PrevString.empty() ? StringRef(CaseValStr) : PrevString); else Diag(CaseVals[i].second->getLHS()->getLocStart(), diag::err_duplicate_case_differing_expr) << - (PrevString.empty() ? CaseValStr.str() : PrevString) << - (CurrString.empty() ? CaseValStr.str() : CurrString) << + (PrevString.empty() ? StringRef(CaseValStr) : PrevString) << + (CurrString.empty() ? StringRef(CaseValStr) : CurrString) << CaseValStr; Diag(CaseVals[i-1].second->getLHS()->getLocStart(), @@ -1046,8 +1060,6 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, // If switch has default case, then ignore it. if (!CaseListIsErroneous && !HasConstantCond && ET) { const EnumDecl *ED = ET->getDecl(); - typedef SmallVector<std::pair<llvm::APSInt, EnumConstantDecl*>, 64> - EnumValsTy; EnumValsTy EnumVals; // Gather all enum values, set their type and sort them, @@ -1058,57 +1070,48 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, EnumVals.push_back(std::make_pair(Val, EDI)); } std::stable_sort(EnumVals.begin(), EnumVals.end(), CmpEnumVals); - EnumValsTy::iterator EIend = + auto EI = EnumVals.begin(), EIEnd = std::unique(EnumVals.begin(), EnumVals.end(), EqEnumVals); // See which case values aren't in enum. - EnumValsTy::const_iterator EI = EnumVals.begin(); for (CaseValsTy::const_iterator CI = CaseVals.begin(); - CI != CaseVals.end(); CI++) { - while (EI != EIend && EI->first < CI->first) - EI++; - if (EI == EIend || EI->first > CI->first) { - Expr *CaseExpr = CI->second->getLHS(); - if (ShouldDiagnoseSwitchCaseNotInEnum(Context, ED, CaseExpr)) - Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum) - << CondTypeBeforePromotion; - } + CI != CaseVals.end(); CI++) { + Expr *CaseExpr = CI->second->getLHS(); + if (ShouldDiagnoseSwitchCaseNotInEnum(*this, ED, CaseExpr, EI, EIEnd, + CI->first)) + Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum) + << CondTypeBeforePromotion; } + // See which of case ranges aren't in enum EI = EnumVals.begin(); for (CaseRangesTy::const_iterator RI = CaseRanges.begin(); - RI != CaseRanges.end() && EI != EIend; RI++) { - while (EI != EIend && EI->first < RI->first) - EI++; - - if (EI == EIend || EI->first != RI->first) { - Expr *CaseExpr = RI->second->getLHS(); - if (ShouldDiagnoseSwitchCaseNotInEnum(Context, ED, CaseExpr)) - Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum) - << CondTypeBeforePromotion; - } + RI != CaseRanges.end(); RI++) { + Expr *CaseExpr = RI->second->getLHS(); + if (ShouldDiagnoseSwitchCaseNotInEnum(*this, ED, CaseExpr, EI, EIEnd, + RI->first)) + Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum) + << CondTypeBeforePromotion; llvm::APSInt Hi = RI->second->getRHS()->EvaluateKnownConstInt(Context); AdjustAPSInt(Hi, CondWidth, CondIsSigned); - while (EI != EIend && EI->first < Hi) - EI++; - if (EI == EIend || EI->first != Hi) { - Expr *CaseExpr = RI->second->getRHS(); - if (ShouldDiagnoseSwitchCaseNotInEnum(Context, ED, CaseExpr)) - Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum) - << CondTypeBeforePromotion; - } + + CaseExpr = RI->second->getRHS(); + if (ShouldDiagnoseSwitchCaseNotInEnum(*this, ED, CaseExpr, EI, EIEnd, + Hi)) + Diag(CaseExpr->getExprLoc(), diag::warn_not_in_enum) + << CondTypeBeforePromotion; } // Check which enum vals aren't in switch - CaseValsTy::const_iterator CI = CaseVals.begin(); - CaseRangesTy::const_iterator RI = CaseRanges.begin(); + auto CI = CaseVals.begin(); + auto RI = CaseRanges.begin(); bool hasCasesNotInSwitch = false; SmallVector<DeclarationName,8> UnhandledNames; - for (EI = EnumVals.begin(); EI != EIend; EI++){ + for (EI = EnumVals.begin(); EI != EIEnd; EI++){ // Drop unneeded case values while (CI != CaseVals.end() && CI->first < EI->first) CI++; @@ -1135,29 +1138,15 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Diag(TheDefaultStmt->getDefaultLoc(), diag::warn_unreachable_default); // Produce a nice diagnostic if multiple values aren't handled. - switch (UnhandledNames.size()) { - case 0: break; - case 1: - Diag(CondExpr->getExprLoc(), TheDefaultStmt - ? diag::warn_def_missing_case1 : diag::warn_missing_case1) - << UnhandledNames[0]; - break; - case 2: - Diag(CondExpr->getExprLoc(), TheDefaultStmt - ? diag::warn_def_missing_case2 : diag::warn_missing_case2) - << UnhandledNames[0] << UnhandledNames[1]; - break; - case 3: - Diag(CondExpr->getExprLoc(), TheDefaultStmt - ? diag::warn_def_missing_case3 : diag::warn_missing_case3) - << UnhandledNames[0] << UnhandledNames[1] << UnhandledNames[2]; - break; - default: - Diag(CondExpr->getExprLoc(), TheDefaultStmt - ? diag::warn_def_missing_cases : diag::warn_missing_cases) - << (unsigned)UnhandledNames.size() - << UnhandledNames[0] << UnhandledNames[1] << UnhandledNames[2]; - break; + if (!UnhandledNames.empty()) { + DiagnosticBuilder DB = Diag(CondExpr->getExprLoc(), + TheDefaultStmt ? diag::warn_def_missing_case + : diag::warn_missing_case) + << (int)UnhandledNames.size(); + + for (size_t I = 0, E = std::min(UnhandledNames.size(), (size_t)3); + I != E; ++I) + DB << UnhandledNames[I]; } if (!hasCasesNotInSwitch) @@ -1195,30 +1184,37 @@ Sema::DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, llvm::APSInt RhsVal = SrcExpr->EvaluateKnownConstInt(Context); AdjustAPSInt(RhsVal, DstWidth, DstIsSigned); const EnumDecl *ED = ET->getDecl(); - typedef SmallVector<std::pair<llvm::APSInt, EnumConstantDecl *>, 64> - EnumValsTy; - EnumValsTy EnumVals; - - // Gather all enum values, set their type and sort them, - // allowing easier comparison with rhs constant. - for (auto *EDI : ED->enumerators()) { - llvm::APSInt Val = EDI->getInitVal(); - AdjustAPSInt(Val, DstWidth, DstIsSigned); - EnumVals.push_back(std::make_pair(Val, EDI)); - } - if (EnumVals.empty()) - return; - std::stable_sort(EnumVals.begin(), EnumVals.end(), CmpEnumVals); - EnumValsTy::iterator EIend = - std::unique(EnumVals.begin(), EnumVals.end(), EqEnumVals); - - // See which values aren't in the enum. - EnumValsTy::const_iterator EI = EnumVals.begin(); - while (EI != EIend && EI->first < RhsVal) - EI++; - if (EI == EIend || EI->first != RhsVal) { - Diag(SrcExpr->getExprLoc(), diag::warn_not_in_enum_assignment) + + if (ED->hasAttr<FlagEnumAttr>()) { + if (!IsValueInFlagEnum(ED, RhsVal, true)) + Diag(SrcExpr->getExprLoc(), diag::warn_not_in_enum_assignment) << DstType.getUnqualifiedType(); + } else { + typedef SmallVector<std::pair<llvm::APSInt, EnumConstantDecl *>, 64> + EnumValsTy; + EnumValsTy EnumVals; + + // Gather all enum values, set their type and sort them, + // allowing easier comparison with rhs constant. + for (auto *EDI : ED->enumerators()) { + llvm::APSInt Val = EDI->getInitVal(); + AdjustAPSInt(Val, DstWidth, DstIsSigned); + EnumVals.push_back(std::make_pair(Val, EDI)); + } + if (EnumVals.empty()) + return; + std::stable_sort(EnumVals.begin(), EnumVals.end(), CmpEnumVals); + EnumValsTy::iterator EIend = + std::unique(EnumVals.begin(), EnumVals.end(), EqEnumVals); + + // See which values aren't in the enum. + EnumValsTy::const_iterator EI = EnumVals.begin(); + while (EI != EIend && EI->first < RhsVal) + EI++; + if (EI == EIend || EI->first != RhsVal) { + Diag(SrcExpr->getExprLoc(), diag::warn_not_in_enum_assignment) + << DstType.getUnqualifiedType(); + } } } } @@ -1832,6 +1828,15 @@ Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc, /// \return true if an error occurs. static bool FinishForRangeVarDecl(Sema &SemaRef, VarDecl *Decl, Expr *Init, SourceLocation Loc, int DiagID) { + if (Decl->getType()->isUndeducedType()) { + ExprResult Res = SemaRef.CorrectDelayedTyposInExpr(Init); + if (!Res.isUsable()) { + Decl->setInvalidDecl(); + return true; + } + Init = Res.get(); + } + // Deduce the type for the iterator variable now rather than leaving it to // AddInitializerToDecl, so we can produce a more suitable diagnostic. QualType InitType; @@ -2368,6 +2373,156 @@ StmtResult Sema::FinishObjCForCollectionStmt(Stmt *S, Stmt *B) { return S; } +// Warn when the loop variable is a const reference that creates a copy. +// Suggest using the non-reference type for copies. If a copy can be prevented +// suggest the const reference type that would do so. +// For instance, given "for (const &Foo : Range)", suggest +// "for (const Foo : Range)" to denote a copy is made for the loop. If +// possible, also suggest "for (const &Bar : Range)" if this type prevents +// the copy altogether. +static void DiagnoseForRangeReferenceVariableCopies(Sema &SemaRef, + const VarDecl *VD, + QualType RangeInitType) { + const Expr *InitExpr = VD->getInit(); + if (!InitExpr) + return; + + QualType VariableType = VD->getType(); + + const MaterializeTemporaryExpr *MTE = + dyn_cast<MaterializeTemporaryExpr>(InitExpr); + + // No copy made. + if (!MTE) + return; + + const Expr *E = MTE->GetTemporaryExpr()->IgnoreImpCasts(); + + // Searching for either UnaryOperator for dereference of a pointer or + // CXXOperatorCallExpr for handling iterators. + while (!isa<CXXOperatorCallExpr>(E) && !isa<UnaryOperator>(E)) { + if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(E)) { + E = CCE->getArg(0); + } else if (const CXXMemberCallExpr *Call = dyn_cast<CXXMemberCallExpr>(E)) { + const MemberExpr *ME = cast<MemberExpr>(Call->getCallee()); + E = ME->getBase(); + } else { + const MaterializeTemporaryExpr *MTE = cast<MaterializeTemporaryExpr>(E); + E = MTE->GetTemporaryExpr(); + } + E = E->IgnoreImpCasts(); + } + + bool ReturnsReference = false; + if (isa<UnaryOperator>(E)) { + ReturnsReference = true; + } else { + const CXXOperatorCallExpr *Call = cast<CXXOperatorCallExpr>(E); + const FunctionDecl *FD = Call->getDirectCallee(); + QualType ReturnType = FD->getReturnType(); + ReturnsReference = ReturnType->isReferenceType(); + } + + if (ReturnsReference) { + // Loop variable creates a temporary. Suggest either to go with + // non-reference loop variable to indiciate a copy is made, or + // the correct time to bind a const reference. + SemaRef.Diag(VD->getLocation(), diag::warn_for_range_const_reference_copy) + << VD << VariableType << E->getType(); + QualType NonReferenceType = VariableType.getNonReferenceType(); + NonReferenceType.removeLocalConst(); + QualType NewReferenceType = + SemaRef.Context.getLValueReferenceType(E->getType().withConst()); + SemaRef.Diag(VD->getLocStart(), diag::note_use_type_or_non_reference) + << NonReferenceType << NewReferenceType << VD->getSourceRange(); + } else { + // The range always returns a copy, so a temporary is always created. + // Suggest removing the reference from the loop variable. + SemaRef.Diag(VD->getLocation(), diag::warn_for_range_variable_always_copy) + << VD << RangeInitType; + QualType NonReferenceType = VariableType.getNonReferenceType(); + NonReferenceType.removeLocalConst(); + SemaRef.Diag(VD->getLocStart(), diag::note_use_non_reference_type) + << NonReferenceType << VD->getSourceRange(); + } +} + +// Warns when the loop variable can be changed to a reference type to +// prevent a copy. For instance, if given "for (const Foo x : Range)" suggest +// "for (const Foo &x : Range)" if this form does not make a copy. +static void DiagnoseForRangeConstVariableCopies(Sema &SemaRef, + const VarDecl *VD) { + const Expr *InitExpr = VD->getInit(); + if (!InitExpr) + return; + + QualType VariableType = VD->getType(); + + if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(InitExpr)) { + if (!CE->getConstructor()->isCopyConstructor()) + return; + } else if (const CastExpr *CE = dyn_cast<CastExpr>(InitExpr)) { + if (CE->getCastKind() != CK_LValueToRValue) + return; + } else { + return; + } + + // TODO: Determine a maximum size that a POD type can be before a diagnostic + // should be emitted. Also, only ignore POD types with trivial copy + // constructors. + if (VariableType.isPODType(SemaRef.Context)) + return; + + // Suggest changing from a const variable to a const reference variable + // if doing so will prevent a copy. + SemaRef.Diag(VD->getLocation(), diag::warn_for_range_copy) + << VD << VariableType << InitExpr->getType(); + SemaRef.Diag(VD->getLocStart(), diag::note_use_reference_type) + << SemaRef.Context.getLValueReferenceType(VariableType) + << VD->getSourceRange(); +} + +/// DiagnoseForRangeVariableCopies - Diagnose three cases and fixes for them. +/// 1) for (const foo &x : foos) where foos only returns a copy. Suggest +/// using "const foo x" to show that a copy is made +/// 2) for (const bar &x : foos) where bar is a temporary intialized by bar. +/// Suggest either "const bar x" to keep the copying or "const foo& x" to +/// prevent the copy. +/// 3) for (const foo x : foos) where x is constructed from a reference foo. +/// Suggest "const foo &x" to prevent the copy. +static void DiagnoseForRangeVariableCopies(Sema &SemaRef, + const CXXForRangeStmt *ForStmt) { + if (SemaRef.Diags.isIgnored(diag::warn_for_range_const_reference_copy, + ForStmt->getLocStart()) && + SemaRef.Diags.isIgnored(diag::warn_for_range_variable_always_copy, + ForStmt->getLocStart()) && + SemaRef.Diags.isIgnored(diag::warn_for_range_copy, + ForStmt->getLocStart())) { + return; + } + + const VarDecl *VD = ForStmt->getLoopVariable(); + if (!VD) + return; + + QualType VariableType = VD->getType(); + + if (VariableType->isIncompleteType()) + return; + + const Expr *InitExpr = VD->getInit(); + if (!InitExpr) + return; + + if (VariableType->isReferenceType()) { + DiagnoseForRangeReferenceVariableCopies(SemaRef, VD, + ForStmt->getRangeInit()->getType()); + } else if (VariableType.isConstQualified()) { + DiagnoseForRangeConstVariableCopies(SemaRef, VD); + } +} + /// FinishCXXForRangeStmt - Attach the body to a C++0x for-range statement. /// This is a separate step from ActOnCXXForRangeStmt because analysis of the /// body cannot be performed until after the type of the range variable is @@ -2385,6 +2540,8 @@ StmtResult Sema::FinishCXXForRangeStmt(Stmt *S, Stmt *B) { DiagnoseEmptyStmtBody(ForStmt->getRParenLoc(), B, diag::warn_empty_range_based_for_body); + DiagnoseForRangeVariableCopies(*this, ForStmt); + return S; } @@ -2423,6 +2580,14 @@ Sema::ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, return new (Context) IndirectGotoStmt(GotoLoc, StarLoc, E); } +static void CheckJumpOutOfSEHFinally(Sema &S, SourceLocation Loc, + const Scope &DestScope) { + if (!S.CurrentSEHFinally.empty() && + DestScope.Contains(*S.CurrentSEHFinally.back())) { + S.Diag(Loc, diag::warn_jump_out_of_seh_finally); + } +} + StmtResult Sema::ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope) { Scope *S = CurScope->getContinueParent(); @@ -2430,6 +2595,7 @@ Sema::ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope) { // C99 6.8.6.2p1: A break shall appear only in or as a loop body. return StmtError(Diag(ContinueLoc, diag::err_continue_not_in_loop)); } + CheckJumpOutOfSEHFinally(*this, ContinueLoc, *S); return new (Context) ContinueStmt(ContinueLoc); } @@ -2444,6 +2610,7 @@ Sema::ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope) { if (S->isOpenMPLoopScope()) return StmtError(Diag(BreakLoc, diag::err_omp_loop_cannot_use_stmt) << "break"); + CheckJumpOutOfSEHFinally(*this, BreakLoc, *S); return new (Context) BreakStmt(BreakLoc); } @@ -2903,6 +3070,8 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, CurScope->setNoNRVO(); } + CheckJumpOutOfSEHFinally(*this, ReturnLoc, *CurScope->getFnParent()); + return R; } @@ -3179,7 +3348,7 @@ Sema::ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Diag(AtLoc, diag::err_objc_exceptions_disabled) << "@throw"; if (!Throw) { - // @throw without an expression designates a rethrow (which much occur + // @throw without an expression designates a rethrow (which must occur // in the context of an @catch clause). Scope *AtCatchParent = CurScope; while (AtCatchParent && !AtCatchParent->isAtCatchScope()) @@ -3251,35 +3420,112 @@ Sema::ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body) { } namespace { +class CatchHandlerType { + QualType QT; + unsigned IsPointer : 1; -class TypeWithHandler { - QualType t; - CXXCatchStmt *stmt; -public: - TypeWithHandler(const QualType &type, CXXCatchStmt *statement) - : t(type), stmt(statement) {} + // This is a special constructor to be used only with DenseMapInfo's + // getEmptyKey() and getTombstoneKey() functions. + friend struct llvm::DenseMapInfo<CatchHandlerType>; + enum Unique { ForDenseMap }; + CatchHandlerType(QualType QT, Unique) : QT(QT), IsPointer(false) {} - // An arbitrary order is fine as long as it places identical - // types next to each other. - bool operator<(const TypeWithHandler &y) const { - if (t.getAsOpaquePtr() < y.t.getAsOpaquePtr()) - return true; - if (t.getAsOpaquePtr() > y.t.getAsOpaquePtr()) +public: + /// Used when creating a CatchHandlerType from a handler type; will determine + /// whether the type is a pointer or reference and will strip off the the top + /// level pointer and cv-qualifiers. + CatchHandlerType(QualType Q) : QT(Q), IsPointer(false) { + if (QT->isPointerType()) + IsPointer = true; + + if (IsPointer || QT->isReferenceType()) + QT = QT->getPointeeType(); + QT = QT.getUnqualifiedType(); + } + + /// Used when creating a CatchHandlerType from a base class type; pretends the + /// type passed in had the pointer qualifier, does not need to get an + /// unqualified type. + CatchHandlerType(QualType QT, bool IsPointer) + : QT(QT), IsPointer(IsPointer) {} + + QualType underlying() const { return QT; } + bool isPointer() const { return IsPointer; } + + friend bool operator==(const CatchHandlerType &LHS, + const CatchHandlerType &RHS) { + // If the pointer qualification does not match, we can return early. + if (LHS.IsPointer != RHS.IsPointer) return false; - else - return getTypeSpecStartLoc() < y.getTypeSpecStartLoc(); + // Otherwise, check the underlying type without cv-qualifiers. + return LHS.QT == RHS.QT; + } +}; +} // namespace + +namespace llvm { +template <> struct DenseMapInfo<CatchHandlerType> { + static CatchHandlerType getEmptyKey() { + return CatchHandlerType(DenseMapInfo<QualType>::getEmptyKey(), + CatchHandlerType::ForDenseMap); } - bool operator==(const TypeWithHandler& other) const { - return t == other.t; + static CatchHandlerType getTombstoneKey() { + return CatchHandlerType(DenseMapInfo<QualType>::getTombstoneKey(), + CatchHandlerType::ForDenseMap); } - CXXCatchStmt *getCatchStmt() const { return stmt; } - SourceLocation getTypeSpecStartLoc() const { - return stmt->getExceptionDecl()->getTypeSpecStartLoc(); + static unsigned getHashValue(const CatchHandlerType &Base) { + return DenseMapInfo<QualType>::getHashValue(Base.underlying()); } + + static bool isEqual(const CatchHandlerType &LHS, + const CatchHandlerType &RHS) { + return LHS == RHS; + } +}; + +// It's OK to treat CatchHandlerType as a POD type. +template <> struct isPodLike<CatchHandlerType> { + static const bool value = true; }; +} + +namespace { +class CatchTypePublicBases { + ASTContext &Ctx; + const llvm::DenseMap<CatchHandlerType, CXXCatchStmt *> &TypesToCheck; + const bool CheckAgainstPointer; + + CXXCatchStmt *FoundHandler; + CanQualType FoundHandlerType; +public: + CatchTypePublicBases( + ASTContext &Ctx, + const llvm::DenseMap<CatchHandlerType, CXXCatchStmt *> &T, bool C) + : Ctx(Ctx), TypesToCheck(T), CheckAgainstPointer(C), + FoundHandler(nullptr) {} + + CXXCatchStmt *getFoundHandler() const { return FoundHandler; } + CanQualType getFoundHandlerType() const { return FoundHandlerType; } + + static bool FindPublicBasesOfType(const CXXBaseSpecifier *S, CXXBasePath &, + void *User) { + auto &PBOT = *reinterpret_cast<CatchTypePublicBases *>(User); + if (S->getAccessSpecifier() == AccessSpecifier::AS_public) { + CatchHandlerType Check(S->getType(), PBOT.CheckAgainstPointer); + auto M = PBOT.TypesToCheck; + auto I = M.find(Check); + if (I != M.end()) { + PBOT.FoundHandler = I->second; + PBOT.FoundHandlerType = PBOT.Ctx.getCanonicalType(S->getType()); + return true; + } + } + return false; + } +}; } /// ActOnCXXTryBlock - Takes a try compound-statement and a number of @@ -3289,74 +3535,120 @@ StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, // Don't report an error if 'try' is used in system headers. if (!getLangOpts().CXXExceptions && !getSourceManager().isInSystemHeader(TryLoc)) - Diag(TryLoc, diag::err_exceptions_disabled) << "try"; + Diag(TryLoc, diag::err_exceptions_disabled) << "try"; if (getCurScope() && getCurScope()->isOpenMPSimdDirectiveScope()) Diag(TryLoc, diag::err_omp_simd_region_cannot_use_stmt) << "try"; + sema::FunctionScopeInfo *FSI = getCurFunction(); + + // C++ try is incompatible with SEH __try. + if (!getLangOpts().Borland && FSI->FirstSEHTryLoc.isValid()) { + Diag(TryLoc, diag::err_mixing_cxx_try_seh_try); + Diag(FSI->FirstSEHTryLoc, diag::note_conflicting_try_here) << "'__try'"; + } + const unsigned NumHandlers = Handlers.size(); - assert(NumHandlers > 0 && + assert(!Handlers.empty() && "The parser shouldn't call this if there are no handlers."); - SmallVector<TypeWithHandler, 8> TypesWithHandlers; - + llvm::DenseMap<CatchHandlerType, CXXCatchStmt *> HandledTypes; for (unsigned i = 0; i < NumHandlers; ++i) { - CXXCatchStmt *Handler = cast<CXXCatchStmt>(Handlers[i]); - if (!Handler->getExceptionDecl()) { - if (i < NumHandlers - 1) - return StmtError(Diag(Handler->getLocStart(), - diag::err_early_catch_all)); + CXXCatchStmt *H = cast<CXXCatchStmt>(Handlers[i]); + // Diagnose when the handler is a catch-all handler, but it isn't the last + // handler for the try block. [except.handle]p5. Also, skip exception + // declarations that are invalid, since we can't usefully report on them. + if (!H->getExceptionDecl()) { + if (i < NumHandlers - 1) + return StmtError(Diag(H->getLocStart(), diag::err_early_catch_all)); + continue; + } else if (H->getExceptionDecl()->isInvalidDecl()) continue; - } - - const QualType CaughtType = Handler->getCaughtType(); - const QualType CanonicalCaughtType = Context.getCanonicalType(CaughtType); - TypesWithHandlers.push_back(TypeWithHandler(CanonicalCaughtType, Handler)); - } - - // Detect handlers for the same type as an earlier one. - if (NumHandlers > 1) { - llvm::array_pod_sort(TypesWithHandlers.begin(), TypesWithHandlers.end()); - - TypeWithHandler prev = TypesWithHandlers[0]; - for (unsigned i = 1; i < TypesWithHandlers.size(); ++i) { - TypeWithHandler curr = TypesWithHandlers[i]; - if (curr == prev) { - Diag(curr.getTypeSpecStartLoc(), - diag::warn_exception_caught_by_earlier_handler) - << curr.getCatchStmt()->getCaughtType().getAsString(); - Diag(prev.getTypeSpecStartLoc(), - diag::note_previous_exception_handler) - << prev.getCatchStmt()->getCaughtType().getAsString(); + // Walk the type hierarchy to diagnose when this type has already been + // handled (duplication), or cannot be handled (derivation inversion). We + // ignore top-level cv-qualifiers, per [except.handle]p3 + CatchHandlerType HandlerCHT = + (QualType)Context.getCanonicalType(H->getCaughtType()); + + // We can ignore whether the type is a reference or a pointer; we need the + // underlying declaration type in order to get at the underlying record + // decl, if there is one. + QualType Underlying = HandlerCHT.underlying(); + if (auto *RD = Underlying->getAsCXXRecordDecl()) { + if (!RD->hasDefinition()) + continue; + // Check that none of the public, unambiguous base classes are in the + // map ([except.handle]p1). Give the base classes the same pointer + // qualification as the original type we are basing off of. This allows + // comparison against the handler type using the same top-level pointer + // as the original type. + CXXBasePaths Paths; + Paths.setOrigin(RD); + CatchTypePublicBases CTPB(Context, HandledTypes, HandlerCHT.isPointer()); + if (RD->lookupInBases(CatchTypePublicBases::FindPublicBasesOfType, &CTPB, + Paths)) { + const CXXCatchStmt *Problem = CTPB.getFoundHandler(); + if (!Paths.isAmbiguous(CTPB.getFoundHandlerType())) { + Diag(H->getExceptionDecl()->getTypeSpecStartLoc(), + diag::warn_exception_caught_by_earlier_handler) + << H->getCaughtType(); + Diag(Problem->getExceptionDecl()->getTypeSpecStartLoc(), + diag::note_previous_exception_handler) + << Problem->getCaughtType(); + } } + } - prev = curr; + // Add the type the list of ones we have handled; diagnose if we've already + // handled it. + auto R = HandledTypes.insert(std::make_pair(H->getCaughtType(), H)); + if (!R.second) { + const CXXCatchStmt *Problem = R.first->second; + Diag(H->getExceptionDecl()->getTypeSpecStartLoc(), + diag::warn_exception_caught_by_earlier_handler) + << H->getCaughtType(); + Diag(Problem->getExceptionDecl()->getTypeSpecStartLoc(), + diag::note_previous_exception_handler) + << Problem->getCaughtType(); } } - getCurFunction()->setHasBranchProtectedScope(); - - // FIXME: We should detect handlers that cannot catch anything because an - // earlier handler catches a superclass. Need to find a method that is not - // quadratic for this. - // Neither of these are explicitly forbidden, but every compiler detects them - // and warns. + FSI->setHasCXXTry(TryLoc); return CXXTryStmt::Create(Context, TryLoc, TryBlock, Handlers); } -StmtResult -Sema::ActOnSEHTryBlock(bool IsCXXTry, - SourceLocation TryLoc, - Stmt *TryBlock, - Stmt *Handler) { +StmtResult Sema::ActOnSEHTryBlock(bool IsCXXTry, SourceLocation TryLoc, + Stmt *TryBlock, Stmt *Handler) { assert(TryBlock && Handler); - getCurFunction()->setHasBranchProtectedScope(); + sema::FunctionScopeInfo *FSI = getCurFunction(); + + // SEH __try is incompatible with C++ try. Borland appears to support this, + // however. + if (!getLangOpts().Borland) { + if (FSI->FirstCXXTryLoc.isValid()) { + Diag(TryLoc, diag::err_mixing_cxx_try_seh_try); + Diag(FSI->FirstCXXTryLoc, diag::note_conflicting_try_here) << "'try'"; + } + } + + FSI->setHasSEHTry(TryLoc); - return SEHTryStmt::Create(Context,IsCXXTry,TryLoc,TryBlock,Handler); + // Reject __try in Obj-C methods, blocks, and captured decls, since we don't + // track if they use SEH. + DeclContext *DC = CurContext; + while (DC && !DC->isFunctionOrMethod()) + DC = DC->getParent(); + FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(DC); + if (FD) + FD->setUsesSEHTry(true); + else + Diag(TryLoc, diag::err_seh_try_outside_functions); + + return SEHTryStmt::Create(Context, IsCXXTry, TryLoc, TryBlock, Handler); } StmtResult @@ -3374,11 +3666,18 @@ Sema::ActOnSEHExceptBlock(SourceLocation Loc, return SEHExceptStmt::Create(Context,Loc,FilterExpr,Block); } -StmtResult -Sema::ActOnSEHFinallyBlock(SourceLocation Loc, - Stmt *Block) { +void Sema::ActOnStartSEHFinallyBlock() { + CurrentSEHFinally.push_back(CurScope); +} + +void Sema::ActOnAbortSEHFinallyBlock() { + CurrentSEHFinally.pop_back(); +} + +StmtResult Sema::ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block) { assert(Block); - return SEHFinallyStmt::Create(Context,Loc,Block); + CurrentSEHFinally.pop_back(); + return SEHFinallyStmt::Create(Context, Loc, Block); } StmtResult @@ -3388,6 +3687,7 @@ Sema::ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope) { SEHTryParent = SEHTryParent->getParent(); if (!SEHTryParent) return StmtError(Diag(Loc, diag::err_ms___leave_not_in___try)); + CheckJumpOutOfSEHFinally(*this, Loc, *SEHTryParent); return new (Context) SEHLeaveStmt(Loc); } diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaStmtAsm.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaStmtAsm.cpp index 0d32581..9f48616 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaStmtAsm.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaStmtAsm.cpp @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "clang/Sema/SemaInternal.h" +#include "clang/AST/ExprCXX.h" #include "clang/AST/RecordLayout.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/TargetInfo.h" @@ -86,6 +87,11 @@ static bool CheckNakedParmReference(Expr *E, Sema &S) { WorkList.push_back(E); while (WorkList.size()) { Expr *E = WorkList.pop_back_val(); + if (isa<CXXThisExpr>(E)) { + S.Diag(E->getLocStart(), diag::err_asm_naked_this_ref); + S.Diag(Func->getAttr<NakedAttr>()->getLocation(), diag::note_attribute); + return true; + } if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { if (isa<ParmVarDecl>(DRE->getDecl())) { S.Diag(DRE->getLocStart(), diag::err_asm_naked_parm_ref); @@ -118,6 +124,9 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, // The parser verifies that there is a string literal here. assert(AsmString->isAscii()); + bool ValidateConstraints = + DeclAttrsMatchCUDAMode(getLangOpts(), getCurFunctionDecl()); + for (unsigned i = 0; i != NumOutputs; i++) { StringLiteral *Literal = Constraints[i]; assert(Literal->isAscii()); @@ -127,7 +136,8 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, OutputName = Names[i]->getName(); TargetInfo::ConstraintInfo Info(Literal->getString(), OutputName); - if (!Context.getTargetInfo().validateOutputConstraint(Info)) + if (ValidateConstraints && + !Context.getTargetInfo().validateOutputConstraint(Info)) return StmtError(Diag(Literal->getLocStart(), diag::err_asm_invalid_output_constraint) << Info.getConstraintStr()); @@ -201,8 +211,9 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, InputName = Names[i]->getName(); TargetInfo::ConstraintInfo Info(Literal->getString(), InputName); - if (!Context.getTargetInfo().validateInputConstraint(OutputConstraintInfos.data(), - NumOutputs, Info)) { + if (ValidateConstraints && + !Context.getTargetInfo().validateInputConstraint( + OutputConstraintInfos.data(), NumOutputs, Info)) { return StmtError(Diag(Literal->getLocStart(), diag::err_asm_invalid_input_constraint) << Info.getConstraintStr()); @@ -307,32 +318,22 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, if (!Piece.isOperand()) continue; // Look for the correct constraint index. - unsigned Idx = 0; - unsigned ConstraintIdx = 0; - for (unsigned i = 0, e = NS->getNumOutputs(); i != e; ++i, ++ConstraintIdx) { - TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i]; - if (Idx == Piece.getOperandNo()) - break; - ++Idx; - - if (Info.isReadWrite()) { - if (Idx == Piece.getOperandNo()) - break; - ++Idx; - } - } + unsigned ConstraintIdx = Piece.getOperandNo(); + unsigned NumOperands = NS->getNumOutputs() + NS->getNumInputs(); - for (unsigned i = 0, e = NS->getNumInputs(); i != e; ++i, ++ConstraintIdx) { - TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i]; - if (Idx == Piece.getOperandNo()) - break; - ++Idx; + // Look for the (ConstraintIdx - NumOperands + 1)th constraint with + // modifier '+'. + if (ConstraintIdx >= NumOperands) { + unsigned I = 0, E = NS->getNumOutputs(); - if (Info.isReadWrite()) { - if (Idx == Piece.getOperandNo()) + for (unsigned Cnt = ConstraintIdx - NumOperands; I != E; ++I) + if (OutputConstraintInfos[I].isReadWrite() && Cnt-- == 0) { + ConstraintIdx = I; break; - ++Idx; - } + } + + assert(I != E && "Invalid operand number should have been caught in " + " AnalyzeAsmString"); } // Now that we have the right indexes go ahead and check. diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp index 1e71762..37eeee2 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp @@ -836,7 +836,8 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, - TemplateParameterList** OuterTemplateParamLists) { + TemplateParameterList** OuterTemplateParamLists, + SkipBodyInfo *SkipBody) { assert(TemplateParams && TemplateParams->size() > 0 && "No template parameters"); assert(TUK != TUK_Reference && "Can only declare or define class templates"); @@ -993,6 +994,19 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, // Check for redefinition of this class template. if (TUK == TUK_Definition) { if (TagDecl *Def = PrevRecordDecl->getDefinition()) { + // If we have a prior definition that is not visible, treat this as + // simply making that previous definition visible. + NamedDecl *Hidden = nullptr; + if (SkipBody && !hasVisibleDefinition(Def, &Hidden)) { + SkipBody->ShouldSkip = true; + auto *Tmpl = cast<CXXRecordDecl>(Hidden)->getDescribedClassTemplate(); + assert(Tmpl && "original definition of a class template is not a " + "class template?"); + makeMergedDefinitionVisible(Hidden, KWLoc); + makeMergedDefinitionVisible(Tmpl, KWLoc); + return Def; + } + Diag(NameLoc, diag::err_redefinition) << Name; Diag(Def->getLocation(), diag::note_previous_definition); // FIXME: Would it make sense to try to "forget" the previous @@ -1296,6 +1310,9 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams, // Merge default arguments for template type parameters. TemplateTypeParmDecl *OldTypeParm = OldParams? cast<TemplateTypeParmDecl>(*OldParam) : nullptr; + // FIXME: There might be a visible declaration of this template parameter. + if (OldTypeParm && !LookupResult::isVisible(*this, OldTypeParm)) + OldTypeParm = nullptr; if (NewTypeParm->isParameterPack()) { assert(!NewTypeParm->hasDefaultArgument() && @@ -1341,6 +1358,8 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams, // Merge default arguments for non-type template parameters NonTypeTemplateParmDecl *OldNonTypeParm = OldParams? cast<NonTypeTemplateParmDecl>(*OldParam) : nullptr; + if (OldNonTypeParm && !LookupResult::isVisible(*this, OldNonTypeParm)) + OldNonTypeParm = nullptr; if (NewNonTypeParm->isParameterPack()) { assert(!NewNonTypeParm->hasDefaultArgument() && "Parameter packs can't have a default argument!"); @@ -1388,6 +1407,8 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams, // Merge default arguments for template template parameters TemplateTemplateParmDecl *OldTemplateParm = OldParams? cast<TemplateTemplateParmDecl>(*OldParam) : nullptr; + if (OldTemplateParm && !LookupResult::isVisible(*this, OldTemplateParm)) + OldTemplateParm = nullptr; if (NewTemplateParm->isParameterPack()) { assert(!NewTemplateParm->hasDefaultArgument() && "Parameter packs can't have a default argument!"); @@ -1798,7 +1819,7 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier( } } else if (const TemplateSpecializationType *TST = T->getAs<TemplateSpecializationType>()) { - if (TemplateDecl *Template = TST->getTemplateName().getAsTemplateDecl()) { + if (TemplateDecl *Template = TST->getTemplateName().getAsTemplateDecl()) { ExpectedTemplateParams = Template->getTemplateParameters(); NeedNonemptyTemplateHeader = true; } @@ -5835,11 +5856,13 @@ static bool CheckTemplateSpecializationScope(Sema &S, if (isa<TranslationUnitDecl>(SpecializedContext)) S.Diag(Loc, diag::err_template_spec_redecl_global_scope) << EntityKind << Specialized; - else if (isa<NamespaceDecl>(SpecializedContext)) - S.Diag(Loc, diag::err_template_spec_redecl_out_of_scope) - << EntityKind << Specialized - << cast<NamedDecl>(SpecializedContext); - else + else if (isa<NamespaceDecl>(SpecializedContext)) { + int Diag = diag::err_template_spec_redecl_out_of_scope; + if (S.getLangOpts().MicrosoftExt) + Diag = diag::ext_ms_template_spec_redecl_out_of_scope; + S.Diag(Loc, Diag) << EntityKind << Specialized + << cast<NamedDecl>(SpecializedContext); + } else llvm_unreachable("unexpected namespace context for specialization"); S.Diag(Specialized->getLocation(), diag::note_specialized_entity); @@ -6036,7 +6059,9 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, AttributeList *Attr, - MultiTemplateParamsArg TemplateParameterLists) { + MultiTemplateParamsArg + TemplateParameterLists, + SkipBodyInfo *SkipBody) { assert(TUK != TUK_Reference && "References are not specializations"); CXXScopeSpec &SS = TemplateId.SS; @@ -6347,7 +6372,14 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, // Check that this isn't a redefinition of this specialization. if (TUK == TUK_Definition) { - if (RecordDecl *Def = Specialization->getDefinition()) { + RecordDecl *Def = Specialization->getDefinition(); + NamedDecl *Hidden = nullptr; + if (Def && SkipBody && !hasVisibleDefinition(Def, &Hidden)) { + SkipBody->ShouldSkip = true; + makeMergedDefinitionVisible(Hidden, KWLoc); + // From here on out, treat this as just a redeclaration. + TUK = TUK_Declaration; + } else if (Def) { SourceRange Range(TemplateNameLoc, RAngleLoc); Diag(TemplateNameLoc, diag::err_redefinition) << Context.getTypeDeclType(Specialization) << Range; @@ -7177,9 +7209,27 @@ Sema::ActOnExplicitInstantiation(Scope *S, // There are two forms of explicit instantiation: an explicit instantiation // definition and an explicit instantiation declaration. An explicit // instantiation declaration begins with the extern keyword. [...] - TemplateSpecializationKind TSK - = ExternLoc.isInvalid()? TSK_ExplicitInstantiationDefinition - : TSK_ExplicitInstantiationDeclaration; + TemplateSpecializationKind TSK = ExternLoc.isInvalid() + ? TSK_ExplicitInstantiationDefinition + : TSK_ExplicitInstantiationDeclaration; + + if (TSK == TSK_ExplicitInstantiationDeclaration) { + // Check for dllexport class template instantiation declarations. + for (AttributeList *A = Attr; A; A = A->getNext()) { + if (A->getKind() == AttributeList::AT_DLLExport) { + Diag(ExternLoc, + diag::warn_attribute_dllexport_explicit_instantiation_decl); + Diag(A->getLoc(), diag::note_attribute); + break; + } + } + + if (auto *A = ClassTemplate->getTemplatedDecl()->getAttr<DLLExportAttr>()) { + Diag(ExternLoc, + diag::warn_attribute_dllexport_explicit_instantiation_decl); + Diag(A->getLocation(), diag::note_attribute); + } + } // Translate the parser's template argument list in our AST format. TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc); @@ -7315,10 +7365,19 @@ Sema::ActOnExplicitInstantiation(Scope *S, // Fix a TSK_ExplicitInstantiationDeclaration followed by a // TSK_ExplicitInstantiationDefinition if (Old_TSK == TSK_ExplicitInstantiationDeclaration && - TSK == TSK_ExplicitInstantiationDefinition) + TSK == TSK_ExplicitInstantiationDefinition) { // FIXME: Need to notify the ASTMutationListener that we did this. Def->setTemplateSpecializationKind(TSK); + if (!getDLLAttr(Def) && getDLLAttr(Specialization)) { + auto *A = cast<InheritableAttr>( + getDLLAttr(Specialization)->clone(getASTContext())); + A->setInherited(true); + Def->addAttr(A); + checkClassLevelDLLAttribute(Def); + } + } + InstantiateClassTemplateSpecializationMembers(TemplateNameLoc, Def, TSK); } @@ -8285,7 +8344,7 @@ void Sema::MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, // Take tokens to avoid allocations LPT->Toks.swap(Toks); LPT->D = FnD; - LateParsedTemplateMap[FD] = LPT; + LateParsedTemplateMap.insert(std::make_pair(FD, LPT)); FD->setLateTemplateParsed(true); } diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp index dd2a4d2..6f676ad 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp @@ -91,30 +91,6 @@ DeduceTemplateArguments(Sema &S, TemplateDeductionInfo &Info, SmallVectorImpl<DeducedTemplateArgument> &Deduced); -/// \brief Whether template argument deduction for two reference parameters -/// resulted in the argument type, parameter type, or neither type being more -/// qualified than the other. -enum DeductionQualifierComparison { - NeitherMoreQualified = 0, - ParamMoreQualified, - ArgMoreQualified -}; - -/// \brief Stores the result of comparing two reference parameters while -/// performing template argument deduction for partial ordering of function -/// templates. -struct RefParamPartialOrderingComparison { - /// \brief Whether the parameter type is an rvalue reference type. - bool ParamIsRvalueRef; - /// \brief Whether the argument type is an rvalue reference type. - bool ArgIsRvalueRef; - - /// \brief Whether the parameter or argument (or neither) is more qualified. - DeductionQualifierComparison Qualifiers; -}; - - - static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(Sema &S, TemplateParameterList *TemplateParams, @@ -124,9 +100,7 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S, SmallVectorImpl<DeducedTemplateArgument> & Deduced, unsigned TDF, - bool PartialOrdering = false, - SmallVectorImpl<RefParamPartialOrderingComparison> * - RefParamComparisons = nullptr); + bool PartialOrdering = false); static Sema::TemplateDeductionResult DeduceTemplateArguments(Sema &S, @@ -602,6 +576,7 @@ struct clang::DeducedPack { DeducedPack *Outer; }; +namespace { /// A scope in which we're performing pack deduction. class PackDeductionScope { public: @@ -756,6 +731,7 @@ private: SmallVector<DeducedPack, 2> Packs; }; +} // namespace /// \brief Deduce the template arguments by comparing the list of parameter /// types to the list of argument types, as in the parameter-type-lists of @@ -784,9 +760,6 @@ private: /// deduction for during partial ordering for a call /// (C++0x [temp.deduct.partial]). /// -/// \param RefParamComparisons If we're performing template argument deduction -/// in the context of partial ordering, the set of qualifier comparisons. -/// /// \returns the result of template argument deduction so far. Note that a /// "success" result means that template argument deduction has not yet failed, /// but it may still fail, later, for other reasons. @@ -798,9 +771,7 @@ DeduceTemplateArguments(Sema &S, TemplateDeductionInfo &Info, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned TDF, - bool PartialOrdering = false, - SmallVectorImpl<RefParamPartialOrderingComparison> * - RefParamComparisons = nullptr) { + bool PartialOrdering = false) { // Fast-path check to see if we have too many/too few arguments. if (NumParams != NumArgs && !(NumParams && isa<PackExpansionType>(Params[NumParams - 1])) && @@ -836,8 +807,7 @@ DeduceTemplateArguments(Sema &S, = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, Params[ParamIdx], Args[ArgIdx], Info, Deduced, TDF, - PartialOrdering, - RefParamComparisons)) + PartialOrdering)) return Result; ++ArgIdx; @@ -869,8 +839,7 @@ DeduceTemplateArguments(Sema &S, if (Sema::TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, Pattern, Args[ArgIdx], Info, Deduced, - TDF, PartialOrdering, - RefParamComparisons)) + TDF, PartialOrdering)) return Result; PackScope.nextPackElement(); @@ -967,9 +936,6 @@ bool Sema::isSameOrCompatibleFunctionType(CanQualType Param, /// \param PartialOrdering Whether we're performing template argument deduction /// in the context of partial ordering (C++0x [temp.deduct.partial]). /// -/// \param RefParamComparisons If we're performing template argument deduction -/// in the context of partial ordering, the set of qualifier comparisons. -/// /// \returns the result of template argument deduction so far. Note that a /// "success" result means that template argument deduction has not yet failed, /// but it may still fail, later, for other reasons. @@ -980,9 +946,7 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S, TemplateDeductionInfo &Info, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned TDF, - bool PartialOrdering, - SmallVectorImpl<RefParamPartialOrderingComparison> * - RefParamComparisons) { + bool PartialOrdering) { // We only want to look at the canonical types, since typedefs and // sugar are not part of template argument deduction. QualType Param = S.Context.getCanonicalType(ParamIn); @@ -995,7 +959,7 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S, Arg = ArgExpansion->getPattern(); if (PartialOrdering) { - // C++0x [temp.deduct.partial]p5: + // C++11 [temp.deduct.partial]p5: // Before the partial ordering is done, certain transformations are // performed on the types used for partial ordering: // - If P is a reference type, P is replaced by the type referred to. @@ -1008,42 +972,42 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S, if (ArgRef) Arg = ArgRef->getPointeeType(); - if (RefParamComparisons && ParamRef && ArgRef) { - // C++0x [temp.deduct.partial]p6: - // If both P and A were reference types (before being replaced with the - // type referred to above), determine which of the two types (if any) is - // more cv-qualified than the other; otherwise the types are considered - // to be equally cv-qualified for partial ordering purposes. The result - // of this determination will be used below. + if (ParamRef && ArgRef && S.Context.hasSameUnqualifiedType(Param, Arg)) { + // C++11 [temp.deduct.partial]p9: + // If, for a given type, deduction succeeds in both directions (i.e., + // the types are identical after the transformations above) and both + // P and A were reference types [...]: + // - if [one type] was an lvalue reference and [the other type] was + // not, [the other type] is not considered to be at least as + // specialized as [the first type] + // - if [one type] is more cv-qualified than [the other type], + // [the other type] is not considered to be at least as specialized + // as [the first type] + // Objective-C ARC adds: + // - [one type] has non-trivial lifetime, [the other type] has + // __unsafe_unretained lifetime, and the types are otherwise + // identical // - // We save this information for later, using it only when deduction - // succeeds in both directions. - RefParamPartialOrderingComparison Comparison; - Comparison.ParamIsRvalueRef = ParamRef->getAs<RValueReferenceType>(); - Comparison.ArgIsRvalueRef = ArgRef->getAs<RValueReferenceType>(); - Comparison.Qualifiers = NeitherMoreQualified; - + // A is "considered to be at least as specialized" as P iff deduction + // succeeds, so we model this as a deduction failure. Note that + // [the first type] is P and [the other type] is A here; the standard + // gets this backwards. Qualifiers ParamQuals = Param.getQualifiers(); Qualifiers ArgQuals = Arg.getQualifiers(); - if (ParamQuals.isStrictSupersetOf(ArgQuals)) - Comparison.Qualifiers = ParamMoreQualified; - else if (ArgQuals.isStrictSupersetOf(ParamQuals)) - Comparison.Qualifiers = ArgMoreQualified; - else if (ArgQuals.getObjCLifetime() != ParamQuals.getObjCLifetime() && - ArgQuals.withoutObjCLifetime() - == ParamQuals.withoutObjCLifetime()) { - // Prefer binding to non-__unsafe_autoretained parameters. - if (ArgQuals.getObjCLifetime() == Qualifiers::OCL_ExplicitNone && - ParamQuals.getObjCLifetime()) - Comparison.Qualifiers = ParamMoreQualified; - else if (ParamQuals.getObjCLifetime() == Qualifiers::OCL_ExplicitNone && - ArgQuals.getObjCLifetime()) - Comparison.Qualifiers = ArgMoreQualified; + if ((ParamRef->isLValueReferenceType() && + !ArgRef->isLValueReferenceType()) || + ParamQuals.isStrictSupersetOf(ArgQuals) || + (ParamQuals.hasNonTrivialObjCLifetime() && + ArgQuals.getObjCLifetime() == Qualifiers::OCL_ExplicitNone && + ParamQuals.withoutObjCLifetime() == + ArgQuals.withoutObjCLifetime())) { + Info.FirstArg = TemplateArgument(ParamIn); + Info.SecondArg = TemplateArgument(ArgIn); + return Sema::TDK_NonDeducedMismatch; } - RefParamComparisons->push_back(Comparison); } - // C++0x [temp.deduct.partial]p7: + // C++11 [temp.deduct.partial]p7: // Remove any top-level cv-qualifiers: // - If P is a cv-qualified type, P is replaced by the cv-unqualified // version of P. @@ -2784,7 +2748,8 @@ Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, TemplateDeductionInfo &Info, - SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs) { + SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs, + bool PartialOverloading) { TemplateParameterList *TemplateParams = FunctionTemplate->getTemplateParameters(); @@ -2911,6 +2876,8 @@ Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate, const_cast<NamedDecl *>(TemplateParams->getParam(I))); Info.reset(TemplateArgumentList::CreateCopy(Context, Builder.data(), Builder.size())); + if (PartialOverloading) break; + return HasDefaultArg ? TDK_SubstitutionFailure : TDK_Incomplete; } @@ -3136,34 +3103,16 @@ static bool AdjustFunctionParmAndArgTypesForDeduction(Sema &S, // are ignored for type deduction. if (ParamType.hasQualifiers()) ParamType = ParamType.getUnqualifiedType(); - const ReferenceType *ParamRefType = ParamType->getAs<ReferenceType>(); - if (ParamRefType) { - QualType PointeeType = ParamRefType->getPointeeType(); - // If the argument has incomplete array type, try to complete its type. - if (ArgType->isIncompleteArrayType() && !S.RequireCompleteExprType(Arg, 0)) - ArgType = Arg->getType(); - - // [C++0x] If P is an rvalue reference to a cv-unqualified - // template parameter and the argument is an lvalue, the type - // "lvalue reference to A" is used in place of A for type - // deduction. - if (isa<RValueReferenceType>(ParamType)) { - if (!PointeeType.getQualifiers() && - isa<TemplateTypeParmType>(PointeeType) && - Arg->Classify(S.Context).isLValue() && - Arg->getType() != S.Context.OverloadTy && - Arg->getType() != S.Context.BoundMemberTy) - ArgType = S.Context.getLValueReferenceType(ArgType); - } - - // [...] If P is a reference type, the type referred to by P is used - // for type deduction. - ParamType = PointeeType; - } + // [...] If P is a reference type, the type referred to by P is + // used for type deduction. + const ReferenceType *ParamRefType = ParamType->getAs<ReferenceType>(); + if (ParamRefType) + ParamType = ParamRefType->getPointeeType(); - // Overload sets usually make this parameter an undeduced - // context, but there are sometimes special circumstances. + // Overload sets usually make this parameter an undeduced context, + // but there are sometimes special circumstances. Typically + // involving a template-id-expr. if (ArgType == S.Context.OverloadTy) { ArgType = ResolveOverloadForDeduction(S, TemplateParams, Arg, ParamType, @@ -3173,12 +3122,17 @@ static bool AdjustFunctionParmAndArgTypesForDeduction(Sema &S, } if (ParamRefType) { + // If the argument has incomplete array type, try to complete its type. + if (ArgType->isIncompleteArrayType() && !S.RequireCompleteExprType(Arg, 0)) + ArgType = Arg->getType(); + // C++0x [temp.deduct.call]p3: - // [...] If P is of the form T&&, where T is a template parameter, and - // the argument is an lvalue, the type A& is used in place of A for - // type deduction. + // If P is an rvalue reference to a cv-unqualified template + // parameter and the argument is an lvalue, the type "lvalue + // reference to A" is used in place of A for type deduction. if (ParamRefType->isRValueReferenceType() && - ParamRefType->getAs<TemplateTypeParmType>() && + !ParamType.getQualifiers() && + isa<TemplateTypeParmType>(ParamType) && Arg->isLValue()) ArgType = S.Context.getLValueReferenceType(ArgType); } else { @@ -3298,26 +3252,28 @@ DeduceTemplateArgumentByListElement(Sema &S, Sema::TemplateDeductionResult Sema::DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, - FunctionDecl *&Specialization, TemplateDeductionInfo &Info) { + FunctionDecl *&Specialization, TemplateDeductionInfo &Info, + bool PartialOverloading) { if (FunctionTemplate->isInvalidDecl()) return TDK_Invalid; FunctionDecl *Function = FunctionTemplate->getTemplatedDecl(); + unsigned NumParams = Function->getNumParams(); // C++ [temp.deduct.call]p1: // Template argument deduction is done by comparing each function template // parameter type (call it P) with the type of the corresponding argument // of the call (call it A) as described below. unsigned CheckArgs = Args.size(); - if (Args.size() < Function->getMinRequiredArguments()) + if (Args.size() < Function->getMinRequiredArguments() && !PartialOverloading) return TDK_TooFewArguments; - else if (Args.size() > Function->getNumParams()) { + else if (TooManyArguments(NumParams, Args.size(), PartialOverloading)) { const FunctionProtoType *Proto = Function->getType()->getAs<FunctionProtoType>(); if (Proto->isTemplateVariadic()) /* Do nothing */; else if (Proto->isVariadic()) - CheckArgs = Function->getNumParams(); + CheckArgs = NumParams; else return TDK_TooManyArguments; } @@ -3344,7 +3300,7 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments( NumExplicitlySpecified = Deduced.size(); } else { // Just fill in the parameter types from the function declaration. - for (unsigned I = 0, N = Function->getNumParams(); I != N; ++I) + for (unsigned I = 0; I != NumParams; ++I) ParamTypes.push_back(Function->getParamDecl(I)->getType()); } @@ -3352,8 +3308,8 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments( Deduced.resize(TemplateParams->size()); unsigned ArgIdx = 0; SmallVector<OriginalCallArg, 4> OriginalCallArgs; - for (unsigned ParamIdx = 0, NumParams = ParamTypes.size(); - ParamIdx != NumParams; ++ParamIdx) { + for (unsigned ParamIdx = 0, NumParamTypes = ParamTypes.size(); + ParamIdx != NumParamTypes; ++ParamIdx) { QualType OrigParamType = ParamTypes[ParamIdx]; QualType ParamType = OrigParamType; @@ -3422,7 +3378,7 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments( // the function parameter pack. For a function parameter pack that does // not occur at the end of the parameter-declaration-list, the type of // the parameter pack is a non-deduced context. - if (ParamIdx + 1 < NumParams) + if (ParamIdx + 1 < NumParamTypes) break; QualType ParamPattern = ParamExpansion->getPattern(); @@ -3492,7 +3448,8 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments( return FinishTemplateArgumentDeduction(FunctionTemplate, Deduced, NumExplicitlySpecified, Specialization, - Info, &OriginalCallArgs); + Info, &OriginalCallArgs, + PartialOverloading); } QualType Sema::adjustCCAndNoReturn(QualType ArgFunctionType, @@ -3699,8 +3656,10 @@ SpecializeCorrespondingLambdaCallOperatorAndInvoker( FunctionTemplateDecl *InvokerTemplate = LambdaClass-> getLambdaStaticInvoker()->getDescribedFunctionTemplate(); - Sema::TemplateDeductionResult LLVM_ATTRIBUTE_UNUSED Result - = S.FinishTemplateArgumentDeduction(InvokerTemplate, DeducedArguments, 0, +#ifndef NDEBUG + Sema::TemplateDeductionResult LLVM_ATTRIBUTE_UNUSED Result = +#endif + S.FinishTemplateArgumentDeduction(InvokerTemplate, DeducedArguments, 0, InvokerSpecialized, TDInfo); assert(Result == Sema::TDK_Success && "If the call operator succeeded so should the invoker!"); @@ -4153,8 +4112,7 @@ static bool isAtLeastAsSpecializedAs(Sema &S, FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, TemplatePartialOrderingContext TPOC, - unsigned NumCallArguments1, - SmallVectorImpl<RefParamPartialOrderingComparison> *RefParamComparisons) { + unsigned NumCallArguments1) { FunctionDecl *FD1 = FT1->getTemplatedDecl(); FunctionDecl *FD2 = FT2->getTemplatedDecl(); const FunctionProtoType *Proto1 = FD1->getType()->getAs<FunctionProtoType>(); @@ -4219,8 +4177,7 @@ static bool isAtLeastAsSpecializedAs(Sema &S, Args2.resize(NumComparedArguments); if (DeduceTemplateArguments(S, TemplateParams, Args2.data(), Args2.size(), Args1.data(), Args1.size(), Info, Deduced, - TDF_None, /*PartialOrdering=*/true, - RefParamComparisons)) + TDF_None, /*PartialOrdering=*/true)) return false; break; @@ -4232,7 +4189,7 @@ static bool isAtLeastAsSpecializedAs(Sema &S, if (DeduceTemplateArgumentsByTypeMatch( S, TemplateParams, Proto2->getReturnType(), Proto1->getReturnType(), Info, Deduced, TDF_None, - /*PartialOrdering=*/true, RefParamComparisons)) + /*PartialOrdering=*/true)) return false; break; @@ -4242,8 +4199,7 @@ static bool isAtLeastAsSpecializedAs(Sema &S, if (DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, FD2->getType(), FD1->getType(), Info, Deduced, TDF_None, - /*PartialOrdering=*/true, - RefParamComparisons)) + /*PartialOrdering=*/true)) return false; break; } @@ -4342,83 +4298,17 @@ Sema::getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2) { - SmallVector<RefParamPartialOrderingComparison, 4> RefParamComparisons; bool Better1 = isAtLeastAsSpecializedAs(*this, Loc, FT1, FT2, TPOC, - NumCallArguments1, nullptr); + NumCallArguments1); bool Better2 = isAtLeastAsSpecializedAs(*this, Loc, FT2, FT1, TPOC, - NumCallArguments2, - &RefParamComparisons); + NumCallArguments2); if (Better1 != Better2) // We have a clear winner - return Better1? FT1 : FT2; + return Better1 ? FT1 : FT2; if (!Better1 && !Better2) // Neither is better than the other return nullptr; - // C++0x [temp.deduct.partial]p10: - // If for each type being considered a given template is at least as - // specialized for all types and more specialized for some set of types and - // the other template is not more specialized for any types or is not at - // least as specialized for any types, then the given template is more - // specialized than the other template. Otherwise, neither template is more - // specialized than the other. - Better1 = false; - Better2 = false; - for (unsigned I = 0, N = RefParamComparisons.size(); I != N; ++I) { - // C++0x [temp.deduct.partial]p9: - // If, for a given type, deduction succeeds in both directions (i.e., the - // types are identical after the transformations above) and both P and A - // were reference types (before being replaced with the type referred to - // above): - - // -- if the type from the argument template was an lvalue reference - // and the type from the parameter template was not, the argument - // type is considered to be more specialized than the other; - // otherwise, - if (!RefParamComparisons[I].ArgIsRvalueRef && - RefParamComparisons[I].ParamIsRvalueRef) { - Better2 = true; - if (Better1) - return nullptr; - continue; - } else if (!RefParamComparisons[I].ParamIsRvalueRef && - RefParamComparisons[I].ArgIsRvalueRef) { - Better1 = true; - if (Better2) - return nullptr; - continue; - } - - // -- if the type from the argument template is more cv-qualified than - // the type from the parameter template (as described above), the - // argument type is considered to be more specialized than the - // other; otherwise, - switch (RefParamComparisons[I].Qualifiers) { - case NeitherMoreQualified: - break; - - case ParamMoreQualified: - Better1 = true; - if (Better2) - return nullptr; - continue; - - case ArgMoreQualified: - Better2 = true; - if (Better1) - return nullptr; - continue; - } - - // -- neither type is more specialized than the other. - } - - assert(!(Better1 && Better2) && "Should have broken out in the loop above"); - if (Better1) - return FT1; - else if (Better2) - return FT2; - // FIXME: This mimics what GCC implements, but doesn't match up with the // proposed resolution for core issue 692. This area needs to be sorted out, // but for now we attempt to maintain compatibility. @@ -4591,8 +4481,7 @@ Sema::getMoreSpecializedPartialSpecialization( bool Better1 = !DeduceTemplateArgumentsByTypeMatch(*this, PS2->getTemplateParameters(), PT2, PT1, Info, Deduced, TDF_None, - /*PartialOrdering=*/true, - /*RefParamComparisons=*/nullptr); + /*PartialOrdering=*/true); if (Better1) { SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(),Deduced.end()); InstantiatingTemplate Inst(*this, Loc, PS2, DeducedArgs, Info); @@ -4605,8 +4494,7 @@ Sema::getMoreSpecializedPartialSpecialization( Deduced.resize(PS1->getTemplateParameters()->size()); bool Better2 = !DeduceTemplateArgumentsByTypeMatch( *this, PS1->getTemplateParameters(), PT1, PT2, Info, Deduced, TDF_None, - /*PartialOrdering=*/true, - /*RefParamComparisons=*/nullptr); + /*PartialOrdering=*/true); if (Better2) { SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(), Deduced.end()); @@ -4649,8 +4537,7 @@ Sema::getMoreSpecializedPartialSpecialization( Deduced.resize(PS2->getTemplateParameters()->size()); bool Better1 = !DeduceTemplateArgumentsByTypeMatch( *this, PS2->getTemplateParameters(), PT2, PT1, Info, Deduced, TDF_None, - /*PartialOrdering=*/true, - /*RefParamComparisons=*/nullptr); + /*PartialOrdering=*/true); if (Better1) { SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(), Deduced.end()); @@ -4666,8 +4553,7 @@ Sema::getMoreSpecializedPartialSpecialization( bool Better2 = !DeduceTemplateArgumentsByTypeMatch(*this, PS1->getTemplateParameters(), PT1, PT2, Info, Deduced, TDF_None, - /*PartialOrdering=*/true, - /*RefParamComparisons=*/nullptr); + /*PartialOrdering=*/true); if (Better2) { SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(),Deduced.end()); InstantiatingTemplate Inst(*this, Loc, PS1, DeducedArgs, Info); diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp index 6ac7175..82ff7c0 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp @@ -200,13 +200,19 @@ bool Sema::ActiveTemplateInstantiation::isInstantiationRecord() const { llvm_unreachable("Invalid InstantiationKind!"); } -void Sema::InstantiatingTemplate::Initialize( - ActiveTemplateInstantiation::InstantiationKind Kind, +Sema::InstantiatingTemplate::InstantiatingTemplate( + Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, - sema::TemplateDeductionInfo *DeductionInfo) { - SavedInNonInstantiationSFINAEContext = - SemaRef.InNonInstantiationSFINAEContext; + sema::TemplateDeductionInfo *DeductionInfo) + : SemaRef(SemaRef), SavedInNonInstantiationSFINAEContext( + SemaRef.InNonInstantiationSFINAEContext) { + // Don't allow further instantiation if a fatal error has occcured. Any + // diagnostics we might have raised will not be visible. + if (SemaRef.Diags.hasFatalErrorOccurred()) { + Invalid = true; + return; + } Invalid = CheckInstantiationDepth(PointOfInstantiation, InstantiationRange); if (!Invalid) { ActiveTemplateInstantiation Inst; @@ -225,124 +231,98 @@ void Sema::InstantiatingTemplate::Initialize( } } -Sema::InstantiatingTemplate:: -InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, - Decl *Entity, - SourceRange InstantiationRange) - : SemaRef(SemaRef) -{ - Initialize(ActiveTemplateInstantiation::TemplateInstantiation, - PointOfInstantiation, InstantiationRange, Entity); -} - -Sema::InstantiatingTemplate:: -InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, - FunctionDecl *Entity, ExceptionSpecification, - SourceRange InstantiationRange) - : SemaRef(SemaRef) -{ - Initialize(ActiveTemplateInstantiation::ExceptionSpecInstantiation, - PointOfInstantiation, InstantiationRange, Entity); -} - -Sema::InstantiatingTemplate:: -InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, - TemplateDecl *Template, - ArrayRef<TemplateArgument> TemplateArgs, - SourceRange InstantiationRange) - : SemaRef(SemaRef) -{ - Initialize(ActiveTemplateInstantiation::DefaultTemplateArgumentInstantiation, - PointOfInstantiation, InstantiationRange, - Template, nullptr, TemplateArgs); -} - -Sema::InstantiatingTemplate:: -InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, - FunctionTemplateDecl *FunctionTemplate, - ArrayRef<TemplateArgument> TemplateArgs, - ActiveTemplateInstantiation::InstantiationKind Kind, - sema::TemplateDeductionInfo &DeductionInfo, - SourceRange InstantiationRange) - : SemaRef(SemaRef) -{ - Initialize(Kind, PointOfInstantiation, InstantiationRange, - FunctionTemplate, nullptr, TemplateArgs, &DeductionInfo); -} - -Sema::InstantiatingTemplate:: -InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, - ClassTemplatePartialSpecializationDecl *PartialSpec, - ArrayRef<TemplateArgument> TemplateArgs, - sema::TemplateDeductionInfo &DeductionInfo, - SourceRange InstantiationRange) - : SemaRef(SemaRef) -{ - Initialize(ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution, - PointOfInstantiation, InstantiationRange, - PartialSpec, nullptr, TemplateArgs, &DeductionInfo); -} +Sema::InstantiatingTemplate::InstantiatingTemplate( + Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, + SourceRange InstantiationRange) + : InstantiatingTemplate(SemaRef, + ActiveTemplateInstantiation::TemplateInstantiation, + PointOfInstantiation, InstantiationRange, Entity) {} + +Sema::InstantiatingTemplate::InstantiatingTemplate( + Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, + ExceptionSpecification, SourceRange InstantiationRange) + : InstantiatingTemplate( + SemaRef, ActiveTemplateInstantiation::ExceptionSpecInstantiation, + PointOfInstantiation, InstantiationRange, Entity) {} + +Sema::InstantiatingTemplate::InstantiatingTemplate( + Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, + ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange) + : InstantiatingTemplate( + SemaRef, + ActiveTemplateInstantiation::DefaultTemplateArgumentInstantiation, + PointOfInstantiation, InstantiationRange, Template, nullptr, + TemplateArgs) {} + +Sema::InstantiatingTemplate::InstantiatingTemplate( + Sema &SemaRef, SourceLocation PointOfInstantiation, + FunctionTemplateDecl *FunctionTemplate, + ArrayRef<TemplateArgument> TemplateArgs, + ActiveTemplateInstantiation::InstantiationKind Kind, + sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange) + : InstantiatingTemplate(SemaRef, Kind, PointOfInstantiation, + InstantiationRange, FunctionTemplate, nullptr, + TemplateArgs, &DeductionInfo) {} + +Sema::InstantiatingTemplate::InstantiatingTemplate( + Sema &SemaRef, SourceLocation PointOfInstantiation, + ClassTemplatePartialSpecializationDecl *PartialSpec, + ArrayRef<TemplateArgument> TemplateArgs, + sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange) + : InstantiatingTemplate( + SemaRef, + ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution, + PointOfInstantiation, InstantiationRange, PartialSpec, nullptr, + TemplateArgs, &DeductionInfo) {} Sema::InstantiatingTemplate::InstantiatingTemplate( Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange) - : SemaRef(SemaRef) -{ - Initialize(ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution, - PointOfInstantiation, InstantiationRange, - PartialSpec, nullptr, TemplateArgs, &DeductionInfo); -} - -Sema::InstantiatingTemplate:: -InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, - ParmVarDecl *Param, - ArrayRef<TemplateArgument> TemplateArgs, - SourceRange InstantiationRange) - : SemaRef(SemaRef) -{ - Initialize(ActiveTemplateInstantiation::DefaultFunctionArgumentInstantiation, - PointOfInstantiation, InstantiationRange, - Param, nullptr, TemplateArgs); -} - - -Sema::InstantiatingTemplate:: -InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, - NamedDecl *Template, NonTypeTemplateParmDecl *Param, - ArrayRef<TemplateArgument> TemplateArgs, - SourceRange InstantiationRange) - : SemaRef(SemaRef) -{ - Initialize(ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution, - PointOfInstantiation, InstantiationRange, - Param, Template, TemplateArgs); -} - -Sema::InstantiatingTemplate:: -InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, - NamedDecl *Template, TemplateTemplateParmDecl *Param, - ArrayRef<TemplateArgument> TemplateArgs, - SourceRange InstantiationRange) - : SemaRef(SemaRef) -{ - Initialize(ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution, - PointOfInstantiation, InstantiationRange, - Param, Template, TemplateArgs); -} - -Sema::InstantiatingTemplate:: -InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, - TemplateDecl *Template, NamedDecl *Param, - ArrayRef<TemplateArgument> TemplateArgs, - SourceRange InstantiationRange) - : SemaRef(SemaRef) -{ - Initialize(ActiveTemplateInstantiation::DefaultTemplateArgumentChecking, - PointOfInstantiation, InstantiationRange, - Param, Template, TemplateArgs); -} + : InstantiatingTemplate( + SemaRef, + ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution, + PointOfInstantiation, InstantiationRange, PartialSpec, nullptr, + TemplateArgs, &DeductionInfo) {} + +Sema::InstantiatingTemplate::InstantiatingTemplate( + Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, + ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange) + : InstantiatingTemplate( + SemaRef, + ActiveTemplateInstantiation::DefaultFunctionArgumentInstantiation, + PointOfInstantiation, InstantiationRange, Param, nullptr, + TemplateArgs) {} + +Sema::InstantiatingTemplate::InstantiatingTemplate( + Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, + NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, + SourceRange InstantiationRange) + : InstantiatingTemplate( + SemaRef, + ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution, + PointOfInstantiation, InstantiationRange, Param, Template, + TemplateArgs) {} + +Sema::InstantiatingTemplate::InstantiatingTemplate( + Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, + TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, + SourceRange InstantiationRange) + : InstantiatingTemplate( + SemaRef, + ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution, + PointOfInstantiation, InstantiationRange, Param, Template, + TemplateArgs) {} + +Sema::InstantiatingTemplate::InstantiatingTemplate( + Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, + NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, + SourceRange InstantiationRange) + : InstantiatingTemplate( + SemaRef, ActiveTemplateInstantiation::DefaultTemplateArgumentChecking, + PointOfInstantiation, InstantiationRange, Param, Template, + TemplateArgs) {} void Sema::InstantiatingTemplate::Clear() { if (!Invalid) { @@ -734,6 +714,20 @@ namespace { } void transformedLocalDecl(Decl *Old, Decl *New) { + // If we've instantiated the call operator of a lambda or the call + // operator template of a generic lambda, update the "instantiation of" + // information. + auto *NewMD = dyn_cast<CXXMethodDecl>(New); + if (NewMD && isLambdaCallOperator(NewMD)) { + auto *OldMD = dyn_cast<CXXMethodDecl>(Old); + if (auto *NewTD = NewMD->getDescribedFunctionTemplate()) + NewTD->setInstantiatedFromMemberTemplate( + OldMD->getDescribedFunctionTemplate()); + else + NewMD->setInstantiationOfMemberFunction(OldMD, + TSK_ImplicitInstantiation); + } + SemaRef.CurrentInstantiationScope->InstantiatedLocal(Old, New); } @@ -836,28 +830,6 @@ namespace { return TreeTransform<TemplateInstantiator>::TransformLambdaExpr(E); } - ExprResult TransformLambdaScope(LambdaExpr *E, - CXXMethodDecl *NewCallOperator, - ArrayRef<InitCaptureInfoTy> InitCaptureExprsAndTypes) { - CXXMethodDecl *const OldCallOperator = E->getCallOperator(); - // In the generic lambda case, we set the NewTemplate to be considered - // an "instantiation" of the OldTemplate. - if (FunctionTemplateDecl *const NewCallOperatorTemplate = - NewCallOperator->getDescribedFunctionTemplate()) { - - FunctionTemplateDecl *const OldCallOperatorTemplate = - OldCallOperator->getDescribedFunctionTemplate(); - NewCallOperatorTemplate->setInstantiatedFromMemberTemplate( - OldCallOperatorTemplate); - } else - // For a non-generic lambda we set the NewCallOperator to - // be an instantiation of the OldCallOperator. - NewCallOperator->setInstantiationOfMemberFunction(OldCallOperator, - TSK_ImplicitInstantiation); - - return inherited::TransformLambdaScope(E, NewCallOperator, - InitCaptureExprsAndTypes); - } TemplateParameterList *TransformTemplateParameterList( TemplateParameterList *OrigTPL) { if (!OrigTPL || !OrigTPL->size()) return OrigTPL; @@ -1767,7 +1739,7 @@ Sema::SubstBaseSpecifiers(CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs) { bool Invalid = false; SmallVector<CXXBaseSpecifier*, 4> InstantiatedBases; - for (const auto Base : Pattern->bases()) { + for (const auto &Base : Pattern->bases()) { if (!Base.getType()->isDependentType()) { if (const CXXRecordDecl *RD = Base.getType()->getAsCXXRecordDecl()) { if (RD->isInvalidDecl()) @@ -2063,6 +2035,10 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation, SourceLocation(), SourceLocation(), nullptr); CheckCompletedCXXClass(Instantiation); + // Default arguments are parsed, if not instantiated. We can go instantiate + // default arg exprs for default constructors if necessary now. + ActOnFinishCXXMemberDefaultArgs(Instantiation); + // Instantiate late parsed attributes, and attach them to their decls. // See Sema::InstantiateAttrs for (LateInstantiatedAttrVec::iterator I = LateAttrs.begin(), @@ -2257,7 +2233,7 @@ bool Sema::InstantiateInClassInitializer( EnterExpressionEvaluationContext EvalContext(*this, Sema::PotentiallyEvaluated); - LocalInstantiationScope Scope(*this); + LocalInstantiationScope Scope(*this, true); // Instantiate the initializer. ActOnStartCXXInClassMemberInitializer(); @@ -2812,6 +2788,16 @@ LocalInstantiationScope::findInstantiationOf(const Decl *D) { isa<TemplateTemplateParmDecl>(D)) return nullptr; + // Local types referenced prior to definition may require instantiation. + if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) + if (RD->isLocalClass()) + return nullptr; + + // Enumeration types referenced prior to definition may appear as a result of + // error recovery. + if (isa<EnumDecl>(D)) + return nullptr; + // If we didn't find the decl, then we either have a sema bug, or we have a // forward reference to a label declaration. Return null to indicate that // we have an uninstantiated label. diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp index 40e8617..5c994f8 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp @@ -202,6 +202,31 @@ static void instantiateDependentEnableIfAttr( New->addAttr(EIA); } +// Constructs and adds to New a new instance of CUDALaunchBoundsAttr using +// template A as the base and arguments from TemplateArgs. +static void instantiateDependentCUDALaunchBoundsAttr( + Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs, + const CUDALaunchBoundsAttr &Attr, Decl *New) { + // The alignment expression is a constant expression. + EnterExpressionEvaluationContext Unevaluated(S, Sema::ConstantEvaluated); + + ExprResult Result = S.SubstExpr(Attr.getMaxThreads(), TemplateArgs); + if (Result.isInvalid()) + return; + Expr *MaxThreads = Result.getAs<Expr>(); + + Expr *MinBlocks = nullptr; + if (Attr.getMinBlocks()) { + Result = S.SubstExpr(Attr.getMinBlocks(), TemplateArgs); + if (Result.isInvalid()) + return; + MinBlocks = Result.getAs<Expr>(); + } + + S.AddLaunchBoundsAttr(Attr.getLocation(), New, MaxThreads, MinBlocks, + Attr.getSpellingListIndex()); +} + void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Tmpl, Decl *New, LateInstantiatedAttrVec *LateAttrs, @@ -233,6 +258,13 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, continue; } + if (const CUDALaunchBoundsAttr *CUDALaunchBounds = + dyn_cast<CUDALaunchBoundsAttr>(TmplAttr)) { + instantiateDependentCUDALaunchBoundsAttr(*this, TemplateArgs, + *CUDALaunchBounds, New); + continue; + } + // Existing DLL attribute on the instantiation takes precedence. if (TmplAttr->getKind() == attr::DLLExport || TmplAttr->getKind() == attr::DLLImport) { @@ -289,6 +321,11 @@ TemplateDeclInstantiator::VisitTranslationUnitDecl(TranslationUnitDecl *D) { } Decl * +TemplateDeclInstantiator::VisitExternCContextDecl(ExternCContextDecl *D) { + llvm_unreachable("extern \"C\" context cannot be instantiated"); +} + +Decl * TemplateDeclInstantiator::VisitLabelDecl(LabelDecl *D) { LabelDecl *Inst = LabelDecl::Create(SemaRef.Context, Owner, D->getLocation(), D->getIdentifier()); @@ -1265,11 +1302,19 @@ Decl *TemplateDeclInstantiator::VisitCXXRecordDecl(CXXRecordDecl *D) { // DR1484 clarifies that the members of a local class are instantiated as part // of the instantiation of their enclosing entity. if (D->isCompleteDefinition() && D->isLocalClass()) { + Sema::SavePendingLocalImplicitInstantiationsRAII + SavedPendingLocalImplicitInstantiations(SemaRef); + SemaRef.InstantiateClass(D->getLocation(), Record, D, TemplateArgs, TSK_ImplicitInstantiation, /*Complain=*/true); + SemaRef.InstantiateClassMembers(D->getLocation(), Record, TemplateArgs, TSK_ImplicitInstantiation); + + // This class may have local implicit instantiations that need to be + // performed within this scope. + SemaRef.PerformPendingInstantiations(/*LocalOnly=*/true); } SemaRef.DiagnoseUnusedNestedTypedefs(Record); @@ -3313,12 +3358,8 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, // it marks vtables used in late parsed templates as used. SavePendingLocalImplicitInstantiationsRAII SavedPendingLocalImplicitInstantiations(*this); - std::unique_ptr<SavePendingInstantiationsAndVTableUsesRAII> - SavePendingInstantiationsAndVTableUses; - if (Recursive) { - SavePendingInstantiationsAndVTableUses.reset( - new SavePendingInstantiationsAndVTableUsesRAII(*this)); - } + SavePendingInstantiationsAndVTableUsesRAII + SavePendingInstantiationsAndVTableUses(*this, /*Enabled=*/Recursive); // Call the LateTemplateParser callback if there is a need to late parse // a templated function definition. @@ -3463,8 +3504,8 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, // instantiation of this template. PerformPendingInstantiations(); - // Restore PendingInstantiations and VTableUses. - SavePendingInstantiationsAndVTableUses.reset(); + // PendingInstantiations and VTableUses are restored through + // SavePendingInstantiationsAndVTableUses's destructor. } } @@ -3780,12 +3821,8 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation, // If we're performing recursive template instantiation, create our own // queue of pending implicit instantiations that we will instantiate // later, while we're still within our own instantiation context. - std::unique_ptr<SavePendingInstantiationsAndVTableUsesRAII> - SavePendingInstantiationsAndVTableUses; - if (Recursive) { - SavePendingInstantiationsAndVTableUses.reset( - new SavePendingInstantiationsAndVTableUsesRAII(*this)); - } + SavePendingInstantiationsAndVTableUsesRAII + SavePendingInstantiationsAndVTableUses(*this, /*Enabled=*/Recursive); LocalInstantiationScope Local(*this); @@ -3812,8 +3849,8 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation, // instantiation of this template. PerformPendingInstantiations(); - // Restore PendingInstantiations and VTableUses. - SavePendingInstantiationsAndVTableUses.reset(); + // PendingInstantiations and VTableUses are restored through + // SavePendingInstantiationsAndVTableUses's destructor. } } @@ -3899,12 +3936,8 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation, // while we're still within our own instantiation context. SavePendingLocalImplicitInstantiationsRAII SavedPendingLocalImplicitInstantiations(*this); - std::unique_ptr<SavePendingInstantiationsAndVTableUsesRAII> - SavePendingInstantiationsAndVTableUses; - if (Recursive) { - SavePendingInstantiationsAndVTableUses.reset( - new SavePendingInstantiationsAndVTableUsesRAII(*this)); - } + SavePendingInstantiationsAndVTableUsesRAII + SavePendingInstantiationsAndVTableUses(*this, /*Enabled=*/Recursive); // Enter the scope of this instantiation. We don't use // PushDeclContext because we don't have a scope. @@ -3970,8 +4003,8 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation, // instantiation of this template. PerformPendingInstantiations(); - // Restore PendingInstantiations and VTableUses. - SavePendingInstantiationsAndVTableUses.reset(); + // PendingInstantiations and VTableUses are restored through + // SavePendingInstantiationsAndVTableUses's destructor. } } @@ -4409,6 +4442,30 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, if (D->isInvalidDecl()) return nullptr; + // Normally this function only searches for already instantiated declaration + // however we have to make an exclusion for local types used before + // definition as in the code: + // + // template<typename T> void f1() { + // void g1(struct x1); + // struct x1 {}; + // } + // + // In this case instantiation of the type of 'g1' requires definition of + // 'x1', which is defined later. Error recovery may produce an enum used + // before definition. In these cases we need to instantiate relevant + // declarations here. + bool NeedInstantiate = false; + if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) + NeedInstantiate = RD->isLocalClass(); + else + NeedInstantiate = isa<EnumDecl>(D); + if (NeedInstantiate) { + Decl *Inst = SubstDecl(D, CurContext, TemplateArgs); + CurrentInstantiationScope->InstantiatedLocal(D, Inst); + return cast<TypeDecl>(Inst); + } + // If we didn't find the decl, then we must have a label decl that hasn't // been found yet. Lazily instantiate it and return it now. assert(isa<LabelDecl>(D)); diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp index e4fab71..fd3ba35 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp @@ -251,15 +251,10 @@ Sema::DiagnoseUnexpandedParameterPacks(SourceLocation Loc, Locations.push_back(Unexpanded[I].second); } - DiagnosticBuilder DB - = Names.size() == 0? Diag(Loc, diag::err_unexpanded_parameter_pack_0) - << (int)UPPC - : Names.size() == 1? Diag(Loc, diag::err_unexpanded_parameter_pack_1) - << (int)UPPC << Names[0] - : Names.size() == 2? Diag(Loc, diag::err_unexpanded_parameter_pack_2) - << (int)UPPC << Names[0] << Names[1] - : Diag(Loc, diag::err_unexpanded_parameter_pack_3_or_more) - << (int)UPPC << Names[0] << Names[1]; + DiagnosticBuilder DB = Diag(Loc, diag::err_unexpanded_parameter_pack) + << (int)UPPC << (int)Names.size(); + for (size_t I = 0, E = std::min(Names.size(), (size_t)2); I != E; ++I) + DB << Names[I]; for (unsigned I = 0, N = Locations.size(); I != N; ++I) DB << SourceRange(Locations[I]); diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp index 0f96a1c..57a4689 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp @@ -112,7 +112,6 @@ static void diagnoseBadTypeAttribute(Sema &S, const AttributeList &attr, case AttributeList::AT_SysVABI: \ case AttributeList::AT_Regparm: \ case AttributeList::AT_Pcs: \ - case AttributeList::AT_PnaclCall: \ case AttributeList::AT_IntelOclBicc // Microsoft-specific type qualifiers. @@ -689,6 +688,31 @@ static void maybeSynthesizeBlockSignature(TypeProcessingState &state, state.setCurrentChunkIndex(declarator.getNumTypeObjects()); } +static void diagnoseAndRemoveTypeQualifiers(Sema &S, const DeclSpec &DS, + unsigned &TypeQuals, + QualType TypeSoFar, + unsigned RemoveTQs, + unsigned DiagID) { + // If this occurs outside a template instantiation, warn the user about + // it; they probably didn't mean to specify a redundant qualifier. + typedef std::pair<DeclSpec::TQ, SourceLocation> QualLoc; + for (QualLoc Qual : {QualLoc(DeclSpec::TQ_const, DS.getConstSpecLoc()), + QualLoc(DeclSpec::TQ_volatile, DS.getVolatileSpecLoc()), + QualLoc(DeclSpec::TQ_atomic, DS.getAtomicSpecLoc())}) { + if (!(RemoveTQs & Qual.first)) + continue; + + if (S.ActiveTemplateInstantiations.empty()) { + if (TypeQuals & Qual.first) + S.Diag(Qual.second, DiagID) + << DeclSpec::getSpecifierName(Qual.first) << TypeSoFar + << FixItHint::CreateRemoval(Qual.second); + } + + TypeQuals &= ~Qual.first; + } +} + /// \brief Convert the specified declspec to the appropriate type /// object. /// \param state Specifies the declarator containing the declaration specifier @@ -869,8 +893,11 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) { else Result = Context.DoubleTy; - if (S.getLangOpts().OpenCL && !S.getOpenCLOptions().cl_khr_fp64) { - S.Diag(DS.getTypeSpecTypeLoc(), diag::err_double_requires_fp64); + if (S.getLangOpts().OpenCL && + !((S.getLangOpts().OpenCLVersion >= 120) || + S.getOpenCLOptions().cl_khr_fp64)) { + S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_requires_extension) + << Result << "cl_khr_fp64"; declarator.setInvalidType(true); } break; @@ -946,6 +973,30 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) { << DS.getSourceRange(); declarator.setInvalidType(true); } + } else if (S.getLangOpts().OpenCL) { + if (const AtomicType *AT = Result->getAs<AtomicType>()) { + const BuiltinType *BT = AT->getValueType()->getAs<BuiltinType>(); + bool NoExtTypes = BT && (BT->getKind() == BuiltinType::Int || + BT->getKind() == BuiltinType::UInt || + BT->getKind() == BuiltinType::Float); + if (!S.getOpenCLOptions().cl_khr_int64_base_atomics && !NoExtTypes) { + S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_requires_extension) + << Result << "cl_khr_int64_base_atomics"; + declarator.setInvalidType(true); + } + if (!S.getOpenCLOptions().cl_khr_int64_extended_atomics && + !NoExtTypes) { + S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_requires_extension) + << Result << "cl_khr_int64_extended_atomics"; + declarator.setInvalidType(true); + } + if (!S.getOpenCLOptions().cl_khr_fp64 && BT && + BT->getKind() == BuiltinType::Double) { + S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_requires_extension) + << Result << "cl_khr_fp64"; + declarator.setInvalidType(true); + } + } } // TypeQuals handled by caller. @@ -1091,24 +1142,22 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) { // Apply const/volatile/restrict qualifiers to T. if (unsigned TypeQuals = DS.getTypeQualifiers()) { - - // Warn about CV qualifiers on functions: C99 6.7.3p8: "If the specification - // of a function type includes any type qualifiers, the behavior is - // undefined." - if (Result->isFunctionType() && TypeQuals) { - if (TypeQuals & DeclSpec::TQ_const) - S.Diag(DS.getConstSpecLoc(), diag::warn_typecheck_function_qualifiers) - << Result << DS.getSourceRange(); - else if (TypeQuals & DeclSpec::TQ_volatile) - S.Diag(DS.getVolatileSpecLoc(), - diag::warn_typecheck_function_qualifiers) - << Result << DS.getSourceRange(); - else { - assert((TypeQuals & (DeclSpec::TQ_restrict | DeclSpec::TQ_atomic)) && - "Has CVRA quals but not C, V, R, or A?"); - // No diagnostic; we'll diagnose 'restrict' or '_Atomic' applied to a - // function type later, in BuildQualifiedType. - } + // Warn about CV qualifiers on function types. + // C99 6.7.3p8: + // If the specification of a function type includes any type qualifiers, + // the behavior is undefined. + // C++11 [dcl.fct]p7: + // The effect of a cv-qualifier-seq in a function declarator is not the + // same as adding cv-qualification on top of the function type. In the + // latter case, the cv-qualifiers are ignored. + if (TypeQuals && Result->isFunctionType()) { + diagnoseAndRemoveTypeQualifiers( + S, DS, TypeQuals, Result, DeclSpec::TQ_const | DeclSpec::TQ_volatile, + S.getLangOpts().CPlusPlus + ? diag::warn_typecheck_function_qualifiers_ignored + : diag::warn_typecheck_function_qualifiers_unspecified); + // No diagnostic for 'restrict' or '_Atomic' applied to a + // function type; we'll diagnose those later, in BuildQualifiedType. } // C++11 [dcl.ref]p1: @@ -1119,25 +1168,11 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) { // There don't appear to be any other contexts in which a cv-qualified // reference type could be formed, so the 'ill-formed' clause here appears // to never happen. - if (DS.getTypeSpecType() == DeclSpec::TST_typename && - TypeQuals && Result->isReferenceType()) { - // If this occurs outside a template instantiation, warn the user about - // it; they probably didn't mean to specify a redundant qualifier. - typedef std::pair<DeclSpec::TQ, SourceLocation> QualLoc; - QualLoc Quals[] = { - QualLoc(DeclSpec::TQ_const, DS.getConstSpecLoc()), - QualLoc(DeclSpec::TQ_volatile, DS.getVolatileSpecLoc()), - QualLoc(DeclSpec::TQ_atomic, DS.getAtomicSpecLoc()) - }; - for (unsigned I = 0, N = llvm::array_lengthof(Quals); I != N; ++I) { - if (S.ActiveTemplateInstantiations.empty()) { - if (TypeQuals & Quals[I].first) - S.Diag(Quals[I].second, diag::warn_typecheck_reference_qualifiers) - << DeclSpec::getSpecifierName(Quals[I].first) << Result - << FixItHint::CreateRemoval(Quals[I].second); - } - TypeQuals &= ~Quals[I].first; - } + if (TypeQuals && Result->isReferenceType()) { + diagnoseAndRemoveTypeQualifiers( + S, DS, TypeQuals, Result, + DeclSpec::TQ_const | DeclSpec::TQ_volatile | DeclSpec::TQ_atomic, + diag::warn_typecheck_reference_qualifiers); } // C90 6.5.3 constraints: "The same type qualifier shall not appear more @@ -1809,13 +1844,7 @@ QualType Sema::BuildMemberPointerType(QualType T, QualType Class, // exception specification. if (CheckDistantExceptionSpec(T)) { Diag(Loc, diag::err_distant_exception_spec); - - // FIXME: If we're doing this as part of template instantiation, - // we should return immediately. - - // Build the type anyway, but use the canonical type so that the - // exception specifiers are stripped off. - T = Context.getCanonicalType(T); + return QualType(); } // C++ 8.3.3p3: A pointer to member shall not point to ... a member @@ -2463,6 +2492,10 @@ getCCForDeclaratorChunk(Sema &S, Declarator &D, // in a member pointer. IsCXXInstanceMethod = D.getTypeObject(I).Kind == DeclaratorChunk::MemberPointer; + } else if (D.getContext() == Declarator::LambdaExprContext) { + // This can only be a call operator for a lambda, which is an instance + // method. + IsCXXInstanceMethod = true; } else { // We're the innermost decl chunk, so must be a function declarator. assert(D.isFunctionDeclarator()); @@ -2476,8 +2509,24 @@ getCCForDeclaratorChunk(Sema &S, Declarator &D, } } - return S.Context.getDefaultCallingConvention(FTI.isVariadic, - IsCXXInstanceMethod); + CallingConv CC = S.Context.getDefaultCallingConvention(FTI.isVariadic, + IsCXXInstanceMethod); + + // Attribute AT_OpenCLKernel affects the calling convention only on + // the SPIR target, hence it cannot be treated as a calling + // convention attribute. This is the simplest place to infer + // "spir_kernel" for OpenCL kernels on SPIR. + if (CC == CC_SpirFunction) { + for (const AttributeList *Attr = D.getDeclSpec().getAttributes().getList(); + Attr; Attr = Attr->getNext()) { + if (Attr->getKind() == AttributeList::AT_OpenCLKernel) { + CC = CC_SpirKernel; + break; + } + } + } + + return CC; } static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state, @@ -2792,8 +2841,16 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state, // class type in C++. if ((T.getCVRQualifiers() || T->isAtomicType()) && !(S.getLangOpts().CPlusPlus && - (T->isDependentType() || T->isRecordType()))) - diagnoseRedundantReturnTypeQualifiers(S, T, D, chunkIndex); + (T->isDependentType() || T->isRecordType()))) { + if (T->isVoidType() && !S.getLangOpts().CPlusPlus && + D.getFunctionDefinitionKind() == FDK_Definition) { + // [6.9.1/3] qualified void return is invalid on a C + // function definition. Apparently ok on declarations and + // in C++ though (!) + S.Diag(DeclType.Loc, diag::err_func_returning_qualified_void) << T; + } else + diagnoseRedundantReturnTypeQualifiers(S, T, D, chunkIndex); + } // Objective-C ARC ownership qualifiers are ignored on the function // return type (by type canonicalization). Complain if this attribute @@ -3424,8 +3481,6 @@ static AttributeList::Kind getAttrListKind(AttributedType::Kind kind) { case AttributedType::attr_pcs: case AttributedType::attr_pcs_vfp: return AttributeList::AT_Pcs; - case AttributedType::attr_pnaclcall: - return AttributeList::AT_PnaclCall; case AttributedType::attr_inteloclbicc: return AttributeList::AT_IntelOclBicc; case AttributedType::attr_ms_abi: @@ -4454,8 +4509,6 @@ static AttributedType::Kind getCCTypeAttrKind(AttributeList &Attr) { .Case("aapcs", AttributedType::attr_pcs) .Case("aapcs-vfp", AttributedType::attr_pcs_vfp); } - case AttributeList::AT_PnaclCall: - return AttributedType::attr_pnaclcall; case AttributeList::AT_IntelOclBicc: return AttributedType::attr_inteloclbicc; case AttributeList::AT_MSABI: @@ -5086,14 +5139,18 @@ bool Sema::RequireCompleteType(SourceLocation Loc, QualType T, /// \param D The definition of the entity. /// \param Suggested Filled in with the declaration that should be made visible /// in order to provide a definition of this entity. -static bool hasVisibleDefinition(Sema &S, NamedDecl *D, NamedDecl **Suggested) { +bool Sema::hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested) { // Easy case: if we don't have modules, all declarations are visible. - if (!S.getLangOpts().Modules) + if (!getLangOpts().Modules) return true; // If this definition was instantiated from a template, map back to the // pattern from which it was instantiated. - if (auto *RD = dyn_cast<CXXRecordDecl>(D)) { + if (isa<TagDecl>(D) && cast<TagDecl>(D)->isBeingDefined()) { + // We're in the middle of defining it; this definition should be treated + // as visible. + return true; + } else if (auto *RD = dyn_cast<CXXRecordDecl>(D)) { if (auto *Pattern = RD->getTemplateInstantiationPattern()) RD = Pattern; D = RD->getDefinition(); @@ -5104,7 +5161,7 @@ static bool hasVisibleDefinition(Sema &S, NamedDecl *D, NamedDecl **Suggested) { // If the enum has a fixed underlying type, any declaration of it will do. *Suggested = nullptr; for (auto *Redecl : ED->redecls()) { - if (LookupResult::isVisible(S, Redecl)) + if (LookupResult::isVisible(*this, Redecl)) return true; if (Redecl->isThisDeclarationADefinition() || (Redecl->isCanonicalDecl() && !*Suggested)) @@ -5119,7 +5176,7 @@ static bool hasVisibleDefinition(Sema &S, NamedDecl *D, NamedDecl **Suggested) { // FIXME: If we merged any other decl into D, and that declaration is visible, // then we should consider a definition to be visible. *Suggested = D; - return LookupResult::isVisible(S, D); + return LookupResult::isVisible(*this, D); } /// Locks in the inheritance model for the given class and all of its bases. @@ -5170,13 +5227,13 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T, // If we know about the definition but it is not visible, complain. NamedDecl *SuggestedDef = nullptr; if (!Diagnoser.Suppressed && Def && - !hasVisibleDefinition(*this, Def, &SuggestedDef)) { + !hasVisibleDefinition(Def, &SuggestedDef)) { // Suppress this error outside of a SFINAE context if we've already // emitted the error once for this type. There's no usefulness in // repeating the diagnostic. // FIXME: Add a Fix-It that imports the corresponding module or includes // the header. - Module *Owner = SuggestedDef->getOwningModule(); + Module *Owner = getOwningModule(SuggestedDef); Diag(Loc, diag::err_module_private_definition) << T << Owner->getFullModuleName(); Diag(SuggestedDef->getLocation(), diag::note_previous_definition); diff --git a/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h b/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h index 36abbb6..f5249fd 100644 --- a/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h +++ b/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h @@ -619,11 +619,6 @@ public: StmtResult TransformCompoundStmt(CompoundStmt *S, bool IsStmtExpr); ExprResult TransformCXXNamedCastExpr(CXXNamedCastExpr *E); - - typedef std::pair<ExprResult, QualType> InitCaptureInfoTy; - /// \brief Transform the captures and body of a lambda expression. - ExprResult TransformLambdaScope(LambdaExpr *E, CXXMethodDecl *CallOperator, - ArrayRef<InitCaptureInfoTy> InitCaptureExprsAndTypes); TemplateParameterList *TransformTemplateParameterList( TemplateParameterList *TPL) { @@ -1702,7 +1697,7 @@ public: } StmtResult RebuildSEHFinallyStmt(SourceLocation Loc, Stmt *Block) { - return getSema().ActOnSEHFinallyBlock(Loc, Block); + return SEHFinallyStmt::Create(getSema().getASTContext(), Loc, Block); } /// \brief Build a new predefined expression. @@ -1870,11 +1865,9 @@ public: return ExprError(); Base = BaseResult.get(); ExprValueKind VK = isArrow ? VK_LValue : Base->getValueKind(); - MemberExpr *ME = - new (getSema().Context) MemberExpr(Base, isArrow, - Member, MemberNameInfo, - cast<FieldDecl>(Member)->getType(), - VK, OK_Ordinary); + MemberExpr *ME = new (getSema().Context) + MemberExpr(Base, isArrow, OpLoc, Member, MemberNameInfo, + cast<FieldDecl>(Member)->getType(), VK, OK_Ordinary); return ME; } @@ -2625,6 +2618,31 @@ public: RBracLoc, Args); } + /// \brief Build a new Objective-C instance/class message to 'super'. + ExprResult RebuildObjCMessageExpr(SourceLocation SuperLoc, + Selector Sel, + ArrayRef<SourceLocation> SelectorLocs, + ObjCMethodDecl *Method, + SourceLocation LBracLoc, + MultiExprArg Args, + SourceLocation RBracLoc) { + ObjCInterfaceDecl *Class = Method->getClassInterface(); + QualType ReceiverTy = SemaRef.Context.getObjCInterfaceType(Class); + + return Method->isInstanceMethod() ? SemaRef.BuildInstanceMessage(nullptr, + ReceiverTy, + SuperLoc, + Sel, Method, LBracLoc, SelectorLocs, + RBracLoc, Args) + : SemaRef.BuildClassMessage(nullptr, + ReceiverTy, + SuperLoc, + Sel, Method, LBracLoc, SelectorLocs, + RBracLoc, Args); + + + } + /// \brief Build a new Objective-C ivar reference expression. /// /// By default, performs semantic analysis to build the new expression. @@ -6647,7 +6665,16 @@ StmtResult TreeTransform<Derived>::TransformOMPExecutableDirective( if (!D->getAssociatedStmt()) { return StmtError(); } - AssociatedStmt = getDerived().TransformStmt(D->getAssociatedStmt()); + getDerived().getSema().ActOnOpenMPRegionStart(D->getDirectiveKind(), + /*CurScope=*/nullptr); + StmtResult Body; + { + Sema::CompoundScopeRAII CompoundScope(getSema()); + Body = getDerived().TransformStmt( + cast<CapturedStmt>(D->getAssociatedStmt())->getCapturedStmt()); + } + AssociatedStmt = + getDerived().getSema().ActOnOpenMPRegionEnd(Body, TClauses); if (AssociatedStmt.isInvalid()) { return StmtError(); } @@ -7834,6 +7861,9 @@ TreeTransform<Derived>::TransformExtVectorElementExpr(ExtVectorElementExpr *E) { template<typename Derived> ExprResult TreeTransform<Derived>::TransformInitListExpr(InitListExpr *E) { + if (InitListExpr *Syntactic = E->getSyntacticForm()) + E = Syntactic; + bool InitChanged = false; SmallVector<Expr*, 4> Inits; @@ -7841,8 +7871,12 @@ TreeTransform<Derived>::TransformInitListExpr(InitListExpr *E) { Inits, &InitChanged)) return ExprError(); - if (!getDerived().AlwaysRebuild() && !InitChanged) - return E; + if (!getDerived().AlwaysRebuild() && !InitChanged) { + // FIXME: Attempt to reuse the existing syntactic form of the InitListExpr + // in some cases. We can't reuse it in general, because the syntactic and + // semantic forms are linked, and we can't know that semantic form will + // match even if the syntactic form does. + } return getDerived().RebuildInitList(E->getLBraceLoc(), Inits, E->getRBraceLoc(), E->getType()); @@ -9092,13 +9126,14 @@ ExprResult TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) { // Transform any init-capture expressions before entering the scope of the // lambda body, because they are not semantically within that scope. + typedef std::pair<ExprResult, QualType> InitCaptureInfoTy; SmallVector<InitCaptureInfoTy, 8> InitCaptureExprsAndTypes; InitCaptureExprsAndTypes.resize(E->explicit_capture_end() - - E->explicit_capture_begin()); + E->explicit_capture_begin()); for (LambdaExpr::capture_iterator C = E->capture_begin(), CEnd = E->capture_end(); C != CEnd; ++C) { - if (!C->isInitCapture()) + if (!E->isInitCapture(C)) continue; EnterExpressionEvaluationContext EEEC(getSema(), Sema::PotentiallyEvaluated); @@ -9120,12 +9155,9 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) { std::make_pair(NewExprInitResult, NewInitCaptureType); } - LambdaScopeInfo *LSI = getSema().PushLambdaScope(); - Sema::FunctionScopeRAII FuncScopeCleanup(getSema()); - // Transform the template parameters, and add them to the current // instantiation scope. The null case is handled correctly. - LSI->GLTemplateParameterList = getDerived().TransformTemplateParameterList( + auto TPL = getDerived().TransformTemplateParameterList( E->getTemplateParameterList()); // Transform the type of the original lambda's call operator. @@ -9153,6 +9185,10 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) { NewCallOpType); } + LambdaScopeInfo *LSI = getSema().PushLambdaScope(); + Sema::FunctionScopeRAII FuncScopeCleanup(getSema()); + LSI->GLTemplateParameterList = TPL; + // Create the local class that will describe the lambda. CXXRecordDecl *Class = getSema().createLambdaClosureType(E->getIntroducerRange(), @@ -9169,34 +9205,22 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) { LSI->CallOperator = NewCallOperator; getDerived().transformAttrs(E->getCallOperator(), NewCallOperator); - - // TransformLambdaScope will manage the function scope, so we can disable the - // cleanup. - FuncScopeCleanup.disable(); - - return getDerived().TransformLambdaScope(E, NewCallOperator, - InitCaptureExprsAndTypes); -} - -template<typename Derived> -ExprResult -TreeTransform<Derived>::TransformLambdaScope(LambdaExpr *E, - CXXMethodDecl *CallOperator, - ArrayRef<InitCaptureInfoTy> InitCaptureExprsAndTypes) { - bool Invalid = false; + getDerived().transformedLocalDecl(E->getCallOperator(), NewCallOperator); // Introduce the context of the call operator. - Sema::ContextRAII SavedContext(getSema(), CallOperator, + Sema::ContextRAII SavedContext(getSema(), NewCallOperator, /*NewThisContext*/false); - LambdaScopeInfo *const LSI = getSema().getCurLambda(); // Enter the scope of the lambda. - getSema().buildLambdaScope(LSI, CallOperator, E->getIntroducerRange(), - E->getCaptureDefault(), - E->getCaptureDefaultLoc(), - E->hasExplicitParameters(), - E->hasExplicitResultType(), - E->isMutable()); + getSema().buildLambdaScope(LSI, NewCallOperator, + E->getIntroducerRange(), + E->getCaptureDefault(), + E->getCaptureDefaultLoc(), + E->hasExplicitParameters(), + E->hasExplicitResultType(), + E->isMutable()); + + bool Invalid = false; // Transform captures. bool FinishedExplicitCaptures = false; @@ -9221,8 +9245,7 @@ TreeTransform<Derived>::TransformLambdaScope(LambdaExpr *E, continue; // Rebuild init-captures, including the implied field declaration. - if (C->isInitCapture()) { - + if (E->isInitCapture(C)) { InitCaptureInfoTy InitExprTypePair = InitCaptureExprsAndTypes[C - E->capture_begin()]; ExprResult Init = InitExprTypePair.first; @@ -9309,28 +9332,34 @@ TreeTransform<Derived>::TransformLambdaScope(LambdaExpr *E, if (!FinishedExplicitCaptures) getSema().finishLambdaExplicitCaptures(LSI); - // Enter a new evaluation context to insulate the lambda from any // cleanups from the enclosing full-expression. getSema().PushExpressionEvaluationContext(Sema::PotentiallyEvaluated); - if (Invalid) { - getSema().ActOnLambdaError(E->getLocStart(), /*CurScope=*/nullptr, - /*IsInstantiation=*/true); - return ExprError(); - } - // Instantiate the body of the lambda expression. - StmtResult Body = getDerived().TransformStmt(E->getBody()); + StmtResult Body = + Invalid ? StmtError() : getDerived().TransformStmt(E->getBody()); + + // ActOnLambda* will pop the function scope for us. + FuncScopeCleanup.disable(); + if (Body.isInvalid()) { + SavedContext.pop(); getSema().ActOnLambdaError(E->getLocStart(), /*CurScope=*/nullptr, /*IsInstantiation=*/true); return ExprError(); } - return getSema().ActOnLambdaExpr(E->getLocStart(), Body.get(), - /*CurScope=*/nullptr, - /*IsInstantiation=*/true); + // Copy the LSI before ActOnFinishFunctionBody removes it. + // FIXME: This is dumb. Store the lambda information somewhere that outlives + // the call operator. + auto LSICopy = *LSI; + getSema().ActOnFinishFunctionBody(NewCallOperator, Body.get(), + /*IsInstantiation*/ true); + SavedContext.pop(); + + return getSema().BuildLambdaExpr(E->getLocStart(), Body.get()->getLocEnd(), + &LSICopy); } template<typename Derived> @@ -10042,6 +10071,19 @@ TreeTransform<Derived>::TransformObjCMessageExpr(ObjCMessageExpr *E) { Args, E->getRightLoc()); } + else if (E->getReceiverKind() == ObjCMessageExpr::SuperClass || + E->getReceiverKind() == ObjCMessageExpr::SuperInstance) { + // Build a new class message send to 'super'. + SmallVector<SourceLocation, 16> SelLocs; + E->getSelectorLocs(SelLocs); + return getDerived().RebuildObjCMessageExpr(E->getSuperLoc(), + E->getSelector(), + SelLocs, + E->getMethodDecl(), + E->getLeftLoc(), + Args, + E->getRightLoc()); + } // Instance message: transform the receiver assert(E->getReceiverKind() == ObjCMessageExpr::Instance && @@ -10720,11 +10762,9 @@ TreeTransform<Derived>::RebuildCXXPseudoDestructorExpr(Expr *Base, !BaseType->getAs<PointerType>()->getPointeeType() ->template getAs<RecordType>())){ // This pseudo-destructor expression is still a pseudo-destructor. - return SemaRef.BuildPseudoDestructorExpr(Base, OperatorLoc, - isArrow? tok::arrow : tok::period, - SS, ScopeType, CCLoc, TildeLoc, - Destroyed, - /*FIXME?*/true); + return SemaRef.BuildPseudoDestructorExpr( + Base, OperatorLoc, isArrow ? tok::arrow : tok::period, SS, ScopeType, + CCLoc, TildeLoc, Destroyed); } TypeSourceInfo *DestroyedType = Destroyed.getTypeSourceInfo(); diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp index 1339322..85c574c 100644 --- a/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp +++ b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp @@ -94,6 +94,7 @@ serialization::getDefinitiveDeclContext(const DeclContext *DC) { switch (DC->getDeclKind()) { // These entities may have multiple definitions. case Decl::TranslationUnit: + case Decl::ExternCContext: case Decl::Namespace: case Decl::LinkageSpec: return nullptr; @@ -149,7 +150,11 @@ serialization::getDefinitiveDeclContext(const DeclContext *DC) { bool serialization::isRedeclarableDeclKind(unsigned Kind) { switch (static_cast<Decl::Kind>(Kind)) { - case Decl::TranslationUnit: // Special case of a "merged" declaration. + case Decl::TranslationUnit: + case Decl::ExternCContext: + // Special case of a "merged" declaration. + return true; + case Decl::Namespace: case Decl::NamespaceAlias: case Decl::Typedef: @@ -223,6 +228,24 @@ bool serialization::isRedeclarableDeclKind(unsigned Kind) { } bool serialization::needsAnonymousDeclarationNumber(const NamedDecl *D) { + // Friend declarations in dependent contexts aren't anonymous in the usual + // sense, but they cannot be found by name lookup in their semantic context + // (or indeed in any context), so we treat them as anonymous. + // + // This doesn't apply to friend tag decls; Sema makes those available to name + // lookup in the surrounding context. + if (D->getFriendObjectKind() && + D->getLexicalDeclContext()->isDependentContext() && !isa<TagDecl>(D)) { + // For function templates and class templates, the template is numbered and + // not its pattern. + if (auto *FD = dyn_cast<FunctionDecl>(D)) + return !FD->getDescribedFunctionTemplate(); + if (auto *RD = dyn_cast<CXXRecordDecl>(D)) + return !RD->getDescribedClassTemplate(); + return true; + } + + // Otherwise, we only care about anonymous class members. if (D->getDeclName() || !isa<CXXRecordDecl>(D->getLexicalDeclContext())) return false; return isa<TagDecl>(D) || isa<FieldDecl>(D); diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h index 38a0ff5..79d1817 100644 --- a/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h +++ b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h @@ -15,6 +15,7 @@ #define LLVM_CLANG_LIB_SERIALIZATION_ASTCOMMON_H #include "clang/AST/ASTContext.h" +#include "clang/AST/DeclFriend.h" #include "clang/Serialization/ASTBitCodes.h" namespace clang { @@ -28,12 +29,14 @@ enum DeclUpdateKind { UPD_CXX_ADDED_FUNCTION_DEFINITION, UPD_CXX_INSTANTIATED_STATIC_DATA_MEMBER, UPD_CXX_INSTANTIATED_CLASS_DEFINITION, + UPD_CXX_RESOLVED_DTOR_DELETE, UPD_CXX_RESOLVED_EXCEPTION_SPEC, UPD_CXX_DEDUCED_RETURN_TYPE, UPD_DECL_MARKED_USED, UPD_MANGLING_NUMBER, UPD_STATIC_LOCAL_NUMBER, - UPD_DECL_MARKED_OPENMP_THREADPRIVATE + UPD_DECL_MARKED_OPENMP_THREADPRIVATE, + UPD_DECL_EXPORTED }; TypeIdx TypeIdxFromBuiltin(const BuiltinType *BT); @@ -85,6 +88,24 @@ bool isRedeclarableDeclKind(unsigned Kind); /// declaration number. bool needsAnonymousDeclarationNumber(const NamedDecl *D); +/// \brief Visit each declaration within \c DC that needs an anonymous +/// declaration number and call \p Visit with the declaration and its number. +template<typename Fn> void numberAnonymousDeclsWithin(const DeclContext *DC, + Fn Visit) { + unsigned Index = 0; + for (Decl *LexicalD : DC->decls()) { + // For a friend decl, we care about the declaration within it, if any. + if (auto *FD = dyn_cast<FriendDecl>(LexicalD)) + LexicalD = FD->getFriendDecl(); + + auto *ND = dyn_cast_or_null<NamedDecl>(LexicalD); + if (!ND || !needsAnonymousDeclarationNumber(ND)) + continue; + + Visit(ND, Index++); + } +} + } // namespace serialization } // namespace clang diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp index 416164e..5e3a827 100644 --- a/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp +++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp @@ -1,4 +1,4 @@ -//===--- ASTReader.cpp - AST File Reader ----------------------------------===// +//===-- ASTReader.cpp - AST File Reader ----------------------------------===// // // The LLVM Compiler Infrastructure // @@ -89,11 +89,13 @@ ChainedASTReaderListener::ReadLanguageOptions(const LangOptions &LangOpts, Second->ReadLanguageOptions(LangOpts, Complain, AllowCompatibleDifferences); } -bool -ChainedASTReaderListener::ReadTargetOptions(const TargetOptions &TargetOpts, - bool Complain) { - return First->ReadTargetOptions(TargetOpts, Complain) || - Second->ReadTargetOptions(TargetOpts, Complain); +bool ChainedASTReaderListener::ReadTargetOptions( + const TargetOptions &TargetOpts, bool Complain, + bool AllowCompatibleDifferences) { + return First->ReadTargetOptions(TargetOpts, Complain, + AllowCompatibleDifferences) || + Second->ReadTargetOptions(TargetOpts, Complain, + AllowCompatibleDifferences); } bool ChainedASTReaderListener::ReadDiagnosticOptions( IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts, bool Complain) { @@ -108,9 +110,12 @@ ChainedASTReaderListener::ReadFileSystemOptions(const FileSystemOptions &FSOpts, } bool ChainedASTReaderListener::ReadHeaderSearchOptions( - const HeaderSearchOptions &HSOpts, bool Complain) { - return First->ReadHeaderSearchOptions(HSOpts, Complain) || - Second->ReadHeaderSearchOptions(HSOpts, Complain); + const HeaderSearchOptions &HSOpts, StringRef SpecificModuleCachePath, + bool Complain) { + return First->ReadHeaderSearchOptions(HSOpts, SpecificModuleCachePath, + Complain) || + Second->ReadHeaderSearchOptions(HSOpts, SpecificModuleCachePath, + Complain); } bool ChainedASTReaderListener::ReadPreprocessorOptions( const PreprocessorOptions &PPOpts, bool Complain, @@ -229,7 +234,8 @@ static bool checkLanguageOptions(const LangOptions &LangOpts, /// \returns true if the target options mis-match, false otherwise. static bool checkTargetOptions(const TargetOptions &TargetOpts, const TargetOptions &ExistingTargetOpts, - DiagnosticsEngine *Diags) { + DiagnosticsEngine *Diags, + bool AllowCompatibleDifferences = true) { #define CHECK_TARGET_OPT(Field, Name) \ if (TargetOpts.Field != ExistingTargetOpts.Field) { \ if (Diags) \ @@ -238,9 +244,16 @@ static bool checkTargetOptions(const TargetOptions &TargetOpts, return true; \ } + // The triple and ABI must match exactly. CHECK_TARGET_OPT(Triple, "target"); - CHECK_TARGET_OPT(CPU, "target CPU"); CHECK_TARGET_OPT(ABI, "target ABI"); + + // We can tolerate different CPUs in many cases, notably when one CPU + // supports a strict superset of another. When allowing compatible + // differences skip this check. + if (!AllowCompatibleDifferences) + CHECK_TARGET_OPT(CPU, "target CPU"); + #undef CHECK_TARGET_OPT // Compare feature sets. @@ -252,43 +265,31 @@ static bool checkTargetOptions(const TargetOptions &TargetOpts, std::sort(ExistingFeatures.begin(), ExistingFeatures.end()); std::sort(ReadFeatures.begin(), ReadFeatures.end()); - unsigned ExistingIdx = 0, ExistingN = ExistingFeatures.size(); - unsigned ReadIdx = 0, ReadN = ReadFeatures.size(); - while (ExistingIdx < ExistingN && ReadIdx < ReadN) { - if (ExistingFeatures[ExistingIdx] == ReadFeatures[ReadIdx]) { - ++ExistingIdx; - ++ReadIdx; - continue; - } - - if (ReadFeatures[ReadIdx] < ExistingFeatures[ExistingIdx]) { - if (Diags) - Diags->Report(diag::err_pch_targetopt_feature_mismatch) - << false << ReadFeatures[ReadIdx]; - return true; - } - - if (Diags) - Diags->Report(diag::err_pch_targetopt_feature_mismatch) - << true << ExistingFeatures[ExistingIdx]; - return true; - } + // We compute the set difference in both directions explicitly so that we can + // diagnose the differences differently. + SmallVector<StringRef, 4> UnmatchedExistingFeatures, UnmatchedReadFeatures; + std::set_difference( + ExistingFeatures.begin(), ExistingFeatures.end(), ReadFeatures.begin(), + ReadFeatures.end(), std::back_inserter(UnmatchedExistingFeatures)); + std::set_difference(ReadFeatures.begin(), ReadFeatures.end(), + ExistingFeatures.begin(), ExistingFeatures.end(), + std::back_inserter(UnmatchedReadFeatures)); + + // If we are allowing compatible differences and the read feature set is + // a strict subset of the existing feature set, there is nothing to diagnose. + if (AllowCompatibleDifferences && UnmatchedReadFeatures.empty()) + return false; - if (ExistingIdx < ExistingN) { - if (Diags) + if (Diags) { + for (StringRef Feature : UnmatchedReadFeatures) Diags->Report(diag::err_pch_targetopt_feature_mismatch) - << true << ExistingFeatures[ExistingIdx]; - return true; - } - - if (ReadIdx < ReadN) { - if (Diags) + << /* is-existing-feature */ false << Feature; + for (StringRef Feature : UnmatchedExistingFeatures) Diags->Report(diag::err_pch_targetopt_feature_mismatch) - << false << ReadFeatures[ReadIdx]; - return true; + << /* is-existing-feature */ true << Feature; } - return false; + return !UnmatchedReadFeatures.empty() || !UnmatchedExistingFeatures.empty(); } bool @@ -302,10 +303,12 @@ PCHValidator::ReadLanguageOptions(const LangOptions &LangOpts, } bool PCHValidator::ReadTargetOptions(const TargetOptions &TargetOpts, - bool Complain) { + bool Complain, + bool AllowCompatibleDifferences) { const TargetOptions &ExistingTargetOpts = PP.getTargetInfo().getTargetOpts(); return checkTargetOptions(TargetOpts, ExistingTargetOpts, - Complain? &Reader.Diags : nullptr); + Complain ? &Reader.Diags : nullptr, + AllowCompatibleDifferences); } namespace { @@ -464,7 +467,7 @@ collectMacroDefinitions(const PreprocessorOptions &PPOpts, Macros[MacroName] = std::make_pair(MacroBody, false); } } - + /// \brief Check the preprocessor options deserialized from the control block /// against the preprocessor options in an existing preprocessor. /// @@ -591,6 +594,36 @@ bool PCHValidator::ReadPreprocessorOptions(const PreprocessorOptions &PPOpts, PP.getLangOpts()); } +/// Check the header search options deserialized from the control block +/// against the header search options in an existing preprocessor. +/// +/// \param Diags If non-null, produce diagnostics for any mismatches incurred. +static bool checkHeaderSearchOptions(const HeaderSearchOptions &HSOpts, + StringRef SpecificModuleCachePath, + StringRef ExistingModuleCachePath, + DiagnosticsEngine *Diags, + const LangOptions &LangOpts) { + if (LangOpts.Modules) { + if (SpecificModuleCachePath != ExistingModuleCachePath) { + if (Diags) + Diags->Report(diag::err_pch_modulecache_mismatch) + << SpecificModuleCachePath << ExistingModuleCachePath; + return true; + } + } + + return false; +} + +bool PCHValidator::ReadHeaderSearchOptions(const HeaderSearchOptions &HSOpts, + StringRef SpecificModuleCachePath, + bool Complain) { + return checkHeaderSearchOptions(HSOpts, SpecificModuleCachePath, + PP.getHeaderSearchInfo().getModuleCachePath(), + Complain ? &Reader.Diags : nullptr, + PP.getLangOpts()); +} + void PCHValidator::ReadCounter(const ModuleFile &M, unsigned Value) { PP.setCounterValue(Value); } @@ -744,8 +777,6 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k, Bits >>= 1; bool ExtensionToken = Bits & 0x01; Bits >>= 1; - bool hasSubmoduleMacros = Bits & 0x01; - Bits >>= 1; bool hadMacroDefinition = Bits & 0x01; Bits >>= 1; @@ -787,49 +818,8 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k, uint32_t MacroDirectivesOffset = endian::readNext<uint32_t, little, unaligned>(d); DataLen -= 4; - SmallVector<uint32_t, 8> LocalMacroIDs; - if (hasSubmoduleMacros) { - while (true) { - uint32_t LocalMacroID = - endian::readNext<uint32_t, little, unaligned>(d); - DataLen -= 4; - if (LocalMacroID == 0xdeadbeef) break; - LocalMacroIDs.push_back(LocalMacroID); - } - } - - if (F.Kind == MK_ImplicitModule || F.Kind == MK_ExplicitModule) { - // Macro definitions are stored from newest to oldest, so reverse them - // before registering them. - llvm::SmallVector<unsigned, 8> MacroSizes; - for (SmallVectorImpl<uint32_t>::iterator - I = LocalMacroIDs.begin(), E = LocalMacroIDs.end(); I != E; /**/) { - unsigned Size = 1; - - static const uint32_t HasOverridesFlag = 0x80000000U; - if (I + 1 != E && (I[1] & HasOverridesFlag)) - Size += 1 + (I[1] & ~HasOverridesFlag); - MacroSizes.push_back(Size); - I += Size; - } - - SmallVectorImpl<uint32_t>::iterator I = LocalMacroIDs.end(); - for (SmallVectorImpl<unsigned>::reverse_iterator SI = MacroSizes.rbegin(), - SE = MacroSizes.rend(); - SI != SE; ++SI) { - I -= *SI; - - uint32_t LocalMacroID = *I; - ArrayRef<uint32_t> Overrides; - if (*SI != 1) - Overrides = llvm::makeArrayRef(&I[2], *SI - 2); - Reader.addPendingMacroFromModule(II, &F, LocalMacroID, Overrides); - } - assert(I == LocalMacroIDs.begin()); - } else { - Reader.addPendingMacroFromPCH(II, &F, MacroDirectivesOffset); - } + Reader.addPendingMacro(II, &F, MacroDirectivesOffset); } Reader.SetIdentifierInfo(ID, II); @@ -1393,6 +1383,7 @@ MacroInfo *ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) { PreprocessorRecordTypes RecType = (PreprocessorRecordTypes)Stream.readRecord(Entry.ID, Record); switch (RecType) { + case PP_MODULE_MACRO: case PP_MACRO_DIRECTIVE_HISTORY: return Macro; @@ -1441,10 +1432,10 @@ MacroInfo *ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) { PreprocessedEntityID GlobalID = getGlobalPreprocessedEntityID(F, Record[NextIndex]); PreprocessingRecord &PPRec = *PP.getPreprocessingRecord(); - PreprocessingRecord::PPEntityID - PPID = PPRec.getPPEntityID(GlobalID-1, /*isLoaded=*/true); - MacroDefinition *PPDef = - cast_or_null<MacroDefinition>(PPRec.getPreprocessedEntity(PPID)); + PreprocessingRecord::PPEntityID PPID = + PPRec.getPPEntityID(GlobalID - 1, /*isLoaded=*/true); + MacroDefinitionRecord *PPDef = cast_or_null<MacroDefinitionRecord>( + PPRec.getPreprocessedEntity(PPID)); if (PPDef) PPRec.RegisterMacroDefinition(Macro, PPDef); } @@ -1586,24 +1577,9 @@ HeaderFileInfoTrait::ReadData(internal_key_ref key, const unsigned char *d, return HFI; } -void -ASTReader::addPendingMacroFromModule(IdentifierInfo *II, ModuleFile *M, - GlobalMacroID GMacID, - ArrayRef<SubmoduleID> Overrides) { - assert(NumCurrentElementsDeserializing > 0 &&"Missing deserialization guard"); - SubmoduleID *OverrideData = nullptr; - if (!Overrides.empty()) { - OverrideData = new (Context) SubmoduleID[Overrides.size() + 1]; - OverrideData[0] = Overrides.size(); - for (unsigned I = 0; I != Overrides.size(); ++I) - OverrideData[I + 1] = getGlobalSubmoduleID(*M, Overrides[I]); - } - PendingMacroIDs[II].push_back(PendingMacroInfo(M, GMacID, OverrideData)); -} - -void ASTReader::addPendingMacroFromPCH(IdentifierInfo *II, - ModuleFile *M, - uint64_t MacroDirectivesOffset) { +void ASTReader::addPendingMacro(IdentifierInfo *II, + ModuleFile *M, + uint64_t MacroDirectivesOffset) { assert(NumCurrentElementsDeserializing > 0 &&"Missing deserialization guard"); PendingMacroIDs[II].push_back(PendingMacroInfo(M, MacroDirectivesOffset)); } @@ -1747,110 +1723,81 @@ void ASTReader::markIdentifierUpToDate(IdentifierInfo *II) { IdentifierGeneration[II] = getGeneration(); } -struct ASTReader::ModuleMacroInfo { - SubmoduleID SubModID; - MacroInfo *MI; - SubmoduleID *Overrides; - // FIXME: Remove this. - ModuleFile *F; - - bool isDefine() const { return MI; } - - SubmoduleID getSubmoduleID() const { return SubModID; } - - ArrayRef<SubmoduleID> getOverriddenSubmodules() const { - if (!Overrides) - return None; - return llvm::makeArrayRef(Overrides + 1, *Overrides); - } - - MacroDirective *import(Preprocessor &PP, SourceLocation ImportLoc) const { - if (!MI) - return PP.AllocateUndefMacroDirective(ImportLoc, SubModID, - getOverriddenSubmodules()); - return PP.AllocateDefMacroDirective(MI, ImportLoc, SubModID, - getOverriddenSubmodules()); - } -}; - -ASTReader::ModuleMacroInfo * -ASTReader::getModuleMacro(const PendingMacroInfo &PMInfo) { - ModuleMacroInfo Info; - - uint32_t ID = PMInfo.ModuleMacroData.MacID; - if (ID & 1) { - // Macro undefinition. - Info.SubModID = getGlobalSubmoduleID(*PMInfo.M, ID >> 1); - Info.MI = nullptr; - } else { - // Macro definition. - GlobalMacroID GMacID = getGlobalMacroID(*PMInfo.M, ID >> 1); - assert(GMacID); - - // If this macro has already been loaded, don't do so again. - // FIXME: This is highly dubious. Multiple macro definitions can have the - // same MacroInfo (and hence the same GMacID) due to #pragma push_macro etc. - if (MacrosLoaded[GMacID - NUM_PREDEF_MACRO_IDS]) - return nullptr; +void ASTReader::resolvePendingMacro(IdentifierInfo *II, + const PendingMacroInfo &PMInfo) { + ModuleFile &M = *PMInfo.M; - Info.MI = getMacro(GMacID); - Info.SubModID = Info.MI->getOwningModuleID(); - } - Info.Overrides = PMInfo.ModuleMacroData.Overrides; - Info.F = PMInfo.M; + BitstreamCursor &Cursor = M.MacroCursor; + SavedStreamPosition SavedPosition(Cursor); + Cursor.JumpToBit(PMInfo.MacroDirectivesOffset); - return new (Context) ModuleMacroInfo(Info); -} + struct ModuleMacroRecord { + SubmoduleID SubModID; + MacroInfo *MI; + SmallVector<SubmoduleID, 8> Overrides; + }; + llvm::SmallVector<ModuleMacroRecord, 8> ModuleMacros; -void ASTReader::resolvePendingMacro(IdentifierInfo *II, - const PendingMacroInfo &PMInfo) { - assert(II); + // We expect to see a sequence of PP_MODULE_MACRO records listing exported + // macros, followed by a PP_MACRO_DIRECTIVE_HISTORY record with the complete + // macro histroy. + RecordData Record; + while (true) { + llvm::BitstreamEntry Entry = + Cursor.advance(BitstreamCursor::AF_DontPopBlockAtEnd); + if (Entry.Kind != llvm::BitstreamEntry::Record) { + Error("malformed block record in AST file"); + return; + } - if (PMInfo.M->Kind != MK_ImplicitModule && - PMInfo.M->Kind != MK_ExplicitModule) { - installPCHMacroDirectives(II, *PMInfo.M, - PMInfo.PCHMacroData.MacroDirectivesOffset); - return; - } + Record.clear(); + switch ((PreprocessorRecordTypes)Cursor.readRecord(Entry.ID, Record)) { + case PP_MACRO_DIRECTIVE_HISTORY: + break; - // Module Macro. + case PP_MODULE_MACRO: { + ModuleMacros.push_back(ModuleMacroRecord()); + auto &Info = ModuleMacros.back(); + Info.SubModID = getGlobalSubmoduleID(M, Record[0]); + Info.MI = getMacro(getGlobalMacroID(M, Record[1])); + for (int I = 2, N = Record.size(); I != N; ++I) + Info.Overrides.push_back(getGlobalSubmoduleID(M, Record[I])); + continue; + } - ModuleMacroInfo *MMI = getModuleMacro(PMInfo); - if (!MMI) - return; + default: + Error("malformed block record in AST file"); + return; + } - Module *Owner = getSubmodule(MMI->getSubmoduleID()); - if (Owner && Owner->NameVisibility == Module::Hidden) { - // Macros in the owning module are hidden. Just remember this macro to - // install if we make this module visible. - HiddenNamesMap[Owner].HiddenMacros.insert(std::make_pair(II, MMI)); - } else { - installImportedMacro(II, MMI, Owner); + // We found the macro directive history; that's the last record + // for this macro. + break; } -} - -void ASTReader::installPCHMacroDirectives(IdentifierInfo *II, - ModuleFile &M, uint64_t Offset) { - assert(M.Kind != MK_ImplicitModule && M.Kind != MK_ExplicitModule); - BitstreamCursor &Cursor = M.MacroCursor; - SavedStreamPosition SavedPosition(Cursor); - Cursor.JumpToBit(Offset); + // Module macros are listed in reverse dependency order. + { + std::reverse(ModuleMacros.begin(), ModuleMacros.end()); + llvm::SmallVector<ModuleMacro*, 8> Overrides; + for (auto &MMR : ModuleMacros) { + Overrides.clear(); + for (unsigned ModID : MMR.Overrides) { + Module *Mod = getSubmodule(ModID); + auto *Macro = PP.getModuleMacro(Mod, II); + assert(Macro && "missing definition for overridden macro"); + Overrides.push_back(Macro); + } - llvm::BitstreamEntry Entry = - Cursor.advance(BitstreamCursor::AF_DontPopBlockAtEnd); - if (Entry.Kind != llvm::BitstreamEntry::Record) { - Error("malformed block record in AST file"); - return; + bool Inserted = false; + Module *Owner = getSubmodule(MMR.SubModID); + PP.addModuleMacro(Owner, II, MMR.MI, Overrides, Inserted); + } } - RecordData Record; - PreprocessorRecordTypes RecType = - (PreprocessorRecordTypes)Cursor.readRecord(Entry.ID, Record); - if (RecType != PP_MACRO_DIRECTIVE_HISTORY) { - Error("malformed block record in AST file"); + // Don't read the directive history for a module; we don't have anywhere + // to put it. + if (M.Kind == MK_ImplicitModule || M.Kind == MK_ExplicitModule) return; - } // Deserialize the macro directives history in reverse source-order. MacroDirective *Latest = nullptr, *Earliest = nullptr; @@ -1861,31 +1808,12 @@ void ASTReader::installPCHMacroDirectives(IdentifierInfo *II, MacroDirective::Kind K = (MacroDirective::Kind)Record[Idx++]; switch (K) { case MacroDirective::MD_Define: { - GlobalMacroID GMacID = getGlobalMacroID(M, Record[Idx++]); - MacroInfo *MI = getMacro(GMacID); - SubmoduleID ImportedFrom = Record[Idx++]; - bool IsAmbiguous = Record[Idx++]; - llvm::SmallVector<unsigned, 4> Overrides; - if (ImportedFrom) { - Overrides.insert(Overrides.end(), - &Record[Idx] + 1, &Record[Idx] + 1 + Record[Idx]); - Idx += Overrides.size() + 1; - } - DefMacroDirective *DefMD = - PP.AllocateDefMacroDirective(MI, Loc, ImportedFrom, Overrides); - DefMD->setAmbiguous(IsAmbiguous); - MD = DefMD; + MacroInfo *MI = getMacro(getGlobalMacroID(M, Record[Idx++])); + MD = PP.AllocateDefMacroDirective(MI, Loc); break; } case MacroDirective::MD_Undefine: { - SubmoduleID ImportedFrom = Record[Idx++]; - llvm::SmallVector<unsigned, 4> Overrides; - if (ImportedFrom) { - Overrides.insert(Overrides.end(), - &Record[Idx] + 1, &Record[Idx] + 1 + Record[Idx]); - Idx += Overrides.size() + 1; - } - MD = PP.AllocateUndefMacroDirective(Loc, ImportedFrom, Overrides); + MD = PP.AllocateUndefMacroDirective(Loc); break; } case MacroDirective::MD_Visibility: @@ -1901,177 +1829,8 @@ void ASTReader::installPCHMacroDirectives(IdentifierInfo *II, Earliest = MD; } - PP.setLoadedMacroDirective(II, Latest); -} - -/// \brief For the given macro definitions, check if they are both in system -/// modules. -static bool areDefinedInSystemModules(MacroInfo *PrevMI, MacroInfo *NewMI, - Module *NewOwner, ASTReader &Reader) { - assert(PrevMI && NewMI); - Module *PrevOwner = nullptr; - if (SubmoduleID PrevModID = PrevMI->getOwningModuleID()) - PrevOwner = Reader.getSubmodule(PrevModID); - SourceManager &SrcMgr = Reader.getSourceManager(); - bool PrevInSystem - = PrevOwner? PrevOwner->IsSystem - : SrcMgr.isInSystemHeader(PrevMI->getDefinitionLoc()); - bool NewInSystem - = NewOwner? NewOwner->IsSystem - : SrcMgr.isInSystemHeader(NewMI->getDefinitionLoc()); - if (PrevOwner && PrevOwner == NewOwner) - return false; - return PrevInSystem && NewInSystem; -} - -void ASTReader::removeOverriddenMacros(IdentifierInfo *II, - SourceLocation ImportLoc, - AmbiguousMacros &Ambig, - ArrayRef<SubmoduleID> Overrides) { - for (unsigned OI = 0, ON = Overrides.size(); OI != ON; ++OI) { - SubmoduleID OwnerID = Overrides[OI]; - - // If this macro is not yet visible, remove it from the hidden names list. - // It won't be there if we're in the middle of making the owner visible. - Module *Owner = getSubmodule(OwnerID); - auto HiddenIt = HiddenNamesMap.find(Owner); - if (HiddenIt != HiddenNamesMap.end()) { - HiddenNames &Hidden = HiddenIt->second; - HiddenMacrosMap::iterator HI = Hidden.HiddenMacros.find(II); - if (HI != Hidden.HiddenMacros.end()) { - // Register the macro now so we don't lose it when we re-export. - PP.appendMacroDirective(II, HI->second->import(PP, ImportLoc)); - - auto SubOverrides = HI->second->getOverriddenSubmodules(); - Hidden.HiddenMacros.erase(HI); - removeOverriddenMacros(II, ImportLoc, Ambig, SubOverrides); - } - } - - // If this macro is already in our list of conflicts, remove it from there. - Ambig.erase( - std::remove_if(Ambig.begin(), Ambig.end(), [&](DefMacroDirective *MD) { - return MD->getInfo()->getOwningModuleID() == OwnerID; - }), - Ambig.end()); - } -} - -ASTReader::AmbiguousMacros * -ASTReader::removeOverriddenMacros(IdentifierInfo *II, - SourceLocation ImportLoc, - ArrayRef<SubmoduleID> Overrides) { - MacroDirective *Prev = PP.getMacroDirective(II); - if (!Prev && Overrides.empty()) - return nullptr; - - DefMacroDirective *PrevDef = Prev ? Prev->getDefinition().getDirective() - : nullptr; - if (PrevDef && PrevDef->isAmbiguous()) { - // We had a prior ambiguity. Check whether we resolve it (or make it worse). - AmbiguousMacros &Ambig = AmbiguousMacroDefs[II]; - Ambig.push_back(PrevDef); - - removeOverriddenMacros(II, ImportLoc, Ambig, Overrides); - - if (!Ambig.empty()) - return &Ambig; - - AmbiguousMacroDefs.erase(II); - } else { - // There's no ambiguity yet. Maybe we're introducing one. - AmbiguousMacros Ambig; - if (PrevDef) - Ambig.push_back(PrevDef); - - removeOverriddenMacros(II, ImportLoc, Ambig, Overrides); - - if (!Ambig.empty()) { - AmbiguousMacros &Result = AmbiguousMacroDefs[II]; - std::swap(Result, Ambig); - return &Result; - } - } - - // We ended up with no ambiguity. - return nullptr; -} - -void ASTReader::installImportedMacro(IdentifierInfo *II, ModuleMacroInfo *MMI, - Module *Owner) { - assert(II && Owner); - - SourceLocation ImportLoc = Owner->MacroVisibilityLoc; - if (ImportLoc.isInvalid()) { - // FIXME: If we made macros from this module visible but didn't provide a - // source location for the import, we don't have a location for the macro. - // Use the location at which the containing module file was first imported - // for now. - ImportLoc = MMI->F->DirectImportLoc; - assert(ImportLoc.isValid() && "no import location for a visible macro?"); - } - - AmbiguousMacros *Prev = - removeOverriddenMacros(II, ImportLoc, MMI->getOverriddenSubmodules()); - - // Create a synthetic macro definition corresponding to the import (or null - // if this was an undefinition of the macro). - MacroDirective *Imported = MMI->import(PP, ImportLoc); - DefMacroDirective *MD = dyn_cast<DefMacroDirective>(Imported); - - // If there's no ambiguity, just install the macro. - if (!Prev) { - PP.appendMacroDirective(II, Imported); - return; - } - assert(!Prev->empty()); - - if (!MD) { - // We imported a #undef that didn't remove all prior definitions. The most - // recent prior definition remains, and we install it in the place of the - // imported directive, as if by a local #pragma pop_macro. - MacroInfo *NewMI = Prev->back()->getInfo(); - Prev->pop_back(); - MD = PP.AllocateDefMacroDirective(NewMI, ImportLoc); - - // Install our #undef first so that we don't lose track of it. We'll replace - // this with whichever macro definition ends up winning. - PP.appendMacroDirective(II, Imported); - } - - // We're introducing a macro definition that creates or adds to an ambiguity. - // We can resolve that ambiguity if this macro is token-for-token identical to - // all of the existing definitions. - MacroInfo *NewMI = MD->getInfo(); - assert(NewMI && "macro definition with no MacroInfo?"); - while (!Prev->empty()) { - MacroInfo *PrevMI = Prev->back()->getInfo(); - assert(PrevMI && "macro definition with no MacroInfo?"); - - // Before marking the macros as ambiguous, check if this is a case where - // both macros are in system headers. If so, we trust that the system - // did not get it wrong. This also handles cases where Clang's own - // headers have a different spelling of certain system macros: - // #define LONG_MAX __LONG_MAX__ (clang's limits.h) - // #define LONG_MAX 0x7fffffffffffffffL (system's limits.h) - // - // FIXME: Remove the defined-in-system-headers check. clang's limits.h - // overrides the system limits.h's macros, so there's no conflict here. - if (NewMI != PrevMI && - !PrevMI->isIdenticalTo(*NewMI, PP, /*Syntactically=*/true) && - !areDefinedInSystemModules(PrevMI, NewMI, Owner, *this)) - break; - - // The previous definition is the same as this one (or both are defined in - // system modules so we can assume they're equivalent); we don't need to - // track it any more. - Prev->pop_back(); - } - - if (!Prev->empty()) - MD->setAmbiguous(true); - - PP.appendMacroDirective(II, MD); + if (Latest) + PP.setLoadedMacroDirective(II, Latest); } ASTReader::InputFileInfo @@ -2426,6 +2185,9 @@ ASTReader::ReadControlBlock(ModuleFile &F, break; } + case KNOWN_MODULE_FILES: + break; + case LANGUAGE_OPTIONS: { bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch) == 0; // FIXME: The &F == *ModuleMgr.begin() check is wrong for modules. @@ -2440,7 +2202,8 @@ ASTReader::ReadControlBlock(ModuleFile &F, case TARGET_OPTIONS: { bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch)==0; if (Listener && &F == *ModuleMgr.begin() && - ParseTargetOptions(Record, Complain, *Listener) && + ParseTargetOptions(Record, Complain, *Listener, + AllowCompatibleConfigurationMismatch) && !DisableValidation && !AllowConfigurationMismatch) return ConfigurationMismatch; break; @@ -2543,7 +2306,7 @@ ASTReader::ReadControlBlock(ModuleFile &F, case INPUT_FILE_OFFSETS: NumInputs = Record[0]; NumUserInputs = Record[1]; - F.InputFileOffsets = (const uint32_t *)Blob.data(); + F.InputFileOffsets = (const uint64_t *)Blob.data(); F.InputFilesLoaded.resize(NumInputs); break; } @@ -2795,6 +2558,8 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) { } case EAGERLY_DESERIALIZED_DECLS: + // FIXME: Skip reading this record if our ASTConsumer doesn't care + // about "interesting" decls (for instance, if we're building a module). for (unsigned I = 0, N = Record.size(); I != N; ++I) EagerlyDeserializedDecls.push_back(getGlobalDeclID(F, Record[I])); break; @@ -2859,11 +2624,6 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) { } break; - case LOCALLY_SCOPED_EXTERN_C_DECLS: - for (unsigned I = 0, N = Record.size(); I != N; ++I) - LocallyScopedExternCDecls.push_back(getGlobalDeclID(F, Record[I])); - break; - case SELECTOR_OFFSETS: { F.SelectorOffsets = (const uint32_t *)Blob.data(); F.LocalNumSelectors = Record[0]; @@ -3066,11 +2826,6 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) { } break; - case DYNAMIC_CLASSES: - for (unsigned I = 0, N = Record.size(); I != N; ++I) - DynamicClasses.push_back(getGlobalDeclID(F, Record[I])); - break; - case PENDING_IMPLICIT_INSTANTIATIONS: if (PendingInstantiations.size() % 2 != 0) { Error("Invalid existing PendingInstantiations"); @@ -3172,16 +2927,26 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) { case OBJC_CATEGORIES: F.ObjCCategories.swap(Record); break; - + case CXX_BASE_SPECIFIER_OFFSETS: { if (F.LocalNumCXXBaseSpecifiers != 0) { Error("duplicate CXX_BASE_SPECIFIER_OFFSETS record in AST file"); return Failure; } - + F.LocalNumCXXBaseSpecifiers = Record[0]; F.CXXBaseSpecifiersOffsets = (const uint32_t *)Blob.data(); - NumCXXBaseSpecifiersLoaded += F.LocalNumCXXBaseSpecifiers; + break; + } + + case CXX_CTOR_INITIALIZERS_OFFSETS: { + if (F.LocalNumCXXCtorInitializers != 0) { + Error("duplicate CXX_CTOR_INITIALIZERS_OFFSETS record in AST file"); + return Failure; + } + + F.LocalNumCXXCtorInitializers = Record[0]; + F.CXXCtorInitializersOffsets = (const uint32_t *)Blob.data(); break; } @@ -3256,6 +3021,18 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) { ReadSourceLocation(F, Record, I).getRawEncoding()); } break; + case DELETE_EXPRS_TO_ANALYZE: + for (unsigned I = 0, N = Record.size(); I != N;) { + DelayedDeleteExprs.push_back(getGlobalDeclID(F, Record[I++])); + const uint64_t Count = Record[I++]; + DelayedDeleteExprs.push_back(Count); + for (uint64_t C = 0; C < Count; ++C) { + DelayedDeleteExprs.push_back(ReadSourceLocation(F, Record, I).getRawEncoding()); + bool IsArrayForm = Record[I++] == 1; + DelayedDeleteExprs.push_back(IsArrayForm); + } + } + break; case IMPORTED_MODULES: { if (F.Kind != MK_ImplicitModule && F.Kind != MK_ExplicitModule) { @@ -3288,16 +3065,6 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) { break; } - case MERGED_DECLARATIONS: { - for (unsigned Idx = 0; Idx < Record.size(); /* increment in loop */) { - GlobalDeclID CanonID = getGlobalDeclID(F, Record[Idx++]); - SmallVectorImpl<GlobalDeclID> &Decls = StoredMergedDecls[CanonID]; - for (unsigned N = Record[Idx++]; N > 0; --N) - Decls.push_back(getGlobalDeclID(F, Record[Idx++])); - } - break; - } - case MACRO_OFFSET: { if (F.LocalNumMacros != 0) { Error("duplicate MACRO_OFFSET record in AST file"); @@ -3322,11 +3089,6 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) { break; } - case MACRO_TABLE: { - // FIXME: Not used yet. - break; - } - case LATE_PARSED_TEMPLATE: { LateParsedTemplates.append(Record.begin(), Record.end()); break; @@ -3472,10 +3234,9 @@ static void moveMethodToBackOfGlobalList(Sema &S, ObjCMethodDecl *Method) { } } -void ASTReader::makeNamesVisible(const HiddenNames &Names, Module *Owner, - bool FromFinalization) { - // FIXME: Only do this if Owner->NameVisibility == AllVisible. - for (Decl *D : Names.HiddenDecls) { +void ASTReader::makeNamesVisible(const HiddenNames &Names, Module *Owner) { + assert(Owner->NameVisibility != Module::Hidden && "nothing to make visible?"); + for (Decl *D : Names) { bool wasHidden = D->Hidden; D->Hidden = false; @@ -3485,22 +3246,11 @@ void ASTReader::makeNamesVisible(const HiddenNames &Names, Module *Owner, } } } - - assert((FromFinalization || Owner->NameVisibility >= Module::MacrosVisible) && - "nothing to make visible?"); - for (const auto &Macro : Names.HiddenMacros) { - if (FromFinalization) - PP.appendMacroDirective(Macro.first, - Macro.second->import(PP, SourceLocation())); - else - installImportedMacro(Macro.first, Macro.second, Owner); - } } void ASTReader::makeModuleVisible(Module *Mod, Module::NameVisibilityKind NameVisibility, - SourceLocation ImportLoc, - bool Complain) { + SourceLocation ImportLoc) { llvm::SmallPtrSet<Module *, 4> Visited; SmallVector<Module *, 4> Stack; Stack.push_back(Mod); @@ -3519,9 +3269,6 @@ void ASTReader::makeModuleVisible(Module *Mod, } // Update the module's name visibility. - if (NameVisibility >= Module::MacrosVisible && - Mod->NameVisibility < Module::MacrosVisible) - Mod->MacroVisibilityLoc = ImportLoc; Mod->NameVisibility = NameVisibility; // If we've already deserialized any names from this module, @@ -3530,8 +3277,7 @@ void ASTReader::makeModuleVisible(Module *Mod, if (Hidden != HiddenNamesMap.end()) { auto HiddenNames = std::move(*Hidden); HiddenNamesMap.erase(Hidden); - makeNamesVisible(HiddenNames.second, HiddenNames.first, - /*FromFinalization*/false); + makeNamesVisible(HiddenNames.second, HiddenNames.first); assert(HiddenNamesMap.find(Mod) == HiddenNamesMap.end() && "making names visible added hidden names"); } @@ -3545,20 +3291,6 @@ void ASTReader::makeModuleVisible(Module *Mod, if (Visited.insert(Exported).second) Stack.push_back(Exported); } - - // Detect any conflicts. - if (Complain) { - assert(ImportLoc.isValid() && "Missing import location"); - for (unsigned I = 0, N = Mod->Conflicts.size(); I != N; ++I) { - if (Mod->Conflicts[I].Other->NameVisibility >= NameVisibility) { - Diag(ImportLoc, diag::warn_module_conflict) - << Mod->getFullModuleName() - << Mod->Conflicts[I].Other->getFullModuleName() - << Mod->Conflicts[I].Message; - // FIXME: Need note where the other module was imported. - } - } - } } } @@ -3717,7 +3449,7 @@ ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName, case UnresolvedModuleRef::Import: if (ResolvedMod) - Unresolved.Mod->Imports.push_back(ResolvedMod); + Unresolved.Mod->Imports.insert(ResolvedMod); continue; case UnresolvedModuleRef::Export: @@ -3785,6 +3517,14 @@ ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName, static ASTFileSignature readASTFileSignature(llvm::BitstreamReader &StreamFile); +/// \brief Whether \p Stream starts with the AST/PCH file magic number 'CPCH'. +static bool startsWithASTFileMagic(BitstreamCursor &Stream) { + return Stream.Read(8) == 'C' && + Stream.Read(8) == 'P' && + Stream.Read(8) == 'C' && + Stream.Read(8) == 'H'; +} + ASTReader::ASTReadResult ASTReader::ReadASTCore(StringRef FileName, ModuleKind Type, @@ -3854,10 +3594,7 @@ ASTReader::ReadASTCore(StringRef FileName, F.SizeInBits = F.Buffer->getBufferSize() * 8; // Sniff for the signature. - if (Stream.Read(8) != 'C' || - Stream.Read(8) != 'P' || - Stream.Read(8) != 'C' || - Stream.Read(8) != 'H') { + if (!startsWithASTFileMagic(Stream)) { Diag(diag::err_not_a_pch_file) << FileName; return Failure; } @@ -3924,7 +3661,7 @@ ASTReader::ReadASTCore(StringRef FileName, return Success; } -void ASTReader::InitializeContext() { +void ASTReader::InitializeContext() { // If there's a listener, notify them that we "read" the translation unit. if (DeserializationListener) DeserializationListener->DeclRead(PREDEF_DECL_TRANSLATION_UNIT_ID, @@ -4047,24 +3784,19 @@ void ASTReader::InitializeContext() { } // Re-export any modules that were imported by a non-module AST file. - // FIXME: This does not make macro-only imports visible again. It also doesn't - // make #includes mapped to module imports visible. + // FIXME: This does not make macro-only imports visible again. for (auto &Import : ImportedModules) { - if (Module *Imported = getSubmodule(Import.ID)) + if (Module *Imported = getSubmodule(Import.ID)) { makeModuleVisible(Imported, Module::AllVisible, - /*ImportLoc=*/Import.ImportLoc, - /*Complain=*/false); + /*ImportLoc=*/Import.ImportLoc); + PP.makeModuleVisible(Imported, Import.ImportLoc); + } } ImportedModules.clear(); } void ASTReader::finalizeForWriting() { - while (!HiddenNamesMap.empty()) { - auto HiddenNames = std::move(*HiddenNamesMap.begin()); - HiddenNamesMap.erase(HiddenNamesMap.begin()); - makeNamesVisible(HiddenNames.second, HiddenNames.first, - /*FromFinalization*/true); - } + // Nothing to do for now. } /// \brief Given a cursor at the start of an AST file, scan ahead and drop the @@ -4097,14 +3829,12 @@ static bool SkipCursorToBlock(BitstreamCursor &Cursor, unsigned BlockID) { } } +/// \brief Reads and return the signature record from \p StreamFile's control +/// block, or else returns 0. static ASTFileSignature readASTFileSignature(llvm::BitstreamReader &StreamFile){ BitstreamCursor Stream(StreamFile); - if (Stream.Read(8) != 'C' || - Stream.Read(8) != 'P' || - Stream.Read(8) != 'C' || - Stream.Read(8) != 'H') { + if (!startsWithASTFileMagic(Stream)) return 0; - } // Scan for the CONTROL_BLOCK_ID block. if (SkipCursorToBlock(Stream, CONTROL_BLOCK_ID)) @@ -4146,10 +3876,7 @@ std::string ASTReader::getOriginalSourceFile(const std::string &ASTFileName, BitstreamCursor Stream(StreamFile); // Sniff for the signature. - if (Stream.Read(8) != 'C' || - Stream.Read(8) != 'P' || - Stream.Read(8) != 'C' || - Stream.Read(8) != 'H') { + if (!startsWithASTFileMagic(Stream)) { Diags.Report(diag::err_fe_not_a_pch_file) << ASTFileName; return std::string(); } @@ -4184,16 +3911,19 @@ namespace { const LangOptions &ExistingLangOpts; const TargetOptions &ExistingTargetOpts; const PreprocessorOptions &ExistingPPOpts; + std::string ExistingModuleCachePath; FileManager &FileMgr; - + public: SimplePCHValidator(const LangOptions &ExistingLangOpts, const TargetOptions &ExistingTargetOpts, const PreprocessorOptions &ExistingPPOpts, + StringRef ExistingModuleCachePath, FileManager &FileMgr) : ExistingLangOpts(ExistingLangOpts), ExistingTargetOpts(ExistingTargetOpts), ExistingPPOpts(ExistingPPOpts), + ExistingModuleCachePath(ExistingModuleCachePath), FileMgr(FileMgr) { } @@ -4203,9 +3933,17 @@ namespace { return checkLanguageOptions(ExistingLangOpts, LangOpts, nullptr, AllowCompatibleDifferences); } - bool ReadTargetOptions(const TargetOptions &TargetOpts, - bool Complain) override { - return checkTargetOptions(ExistingTargetOpts, TargetOpts, nullptr); + bool ReadTargetOptions(const TargetOptions &TargetOpts, bool Complain, + bool AllowCompatibleDifferences) override { + return checkTargetOptions(ExistingTargetOpts, TargetOpts, nullptr, + AllowCompatibleDifferences); + } + bool ReadHeaderSearchOptions(const HeaderSearchOptions &HSOpts, + StringRef SpecificModuleCachePath, + bool Complain) override { + return checkHeaderSearchOptions(HSOpts, SpecificModuleCachePath, + ExistingModuleCachePath, + nullptr, ExistingLangOpts); } bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts, bool Complain, @@ -4220,6 +3958,8 @@ bool ASTReader::readASTFileControlBlock(StringRef Filename, FileManager &FileMgr, ASTReaderListener &Listener) { // Open the AST file. + // FIXME: This allows use of the VFS; we do not allow use of the + // VFS when actually loading a module. auto Buffer = FileMgr.getBufferForFile(Filename); if (!Buffer) { return true; @@ -4232,12 +3972,8 @@ bool ASTReader::readASTFileControlBlock(StringRef Filename, BitstreamCursor Stream(StreamFile); // Sniff for the signature. - if (Stream.Read(8) != 'C' || - Stream.Read(8) != 'P' || - Stream.Read(8) != 'C' || - Stream.Read(8) != 'H') { + if (!startsWithASTFileMagic(Stream)) return true; - } // Scan for the CONTROL_BLOCK_ID block. if (SkipCursorToBlock(Stream, CONTROL_BLOCK_ID)) @@ -4310,7 +4046,8 @@ bool ASTReader::readASTFileControlBlock(StringRef Filename, break; case TARGET_OPTIONS: - if (ParseTargetOptions(Record, false, Listener)) + if (ParseTargetOptions(Record, false, Listener, + /*AllowCompatibleConfigurationMismatch*/ false)) return true; break; @@ -4343,7 +4080,7 @@ bool ASTReader::readASTFileControlBlock(StringRef Filename, unsigned NumInputFiles = Record[0]; unsigned NumUserFiles = Record[1]; - const uint32_t *InputFileOffs = (const uint32_t *)Blob.data(); + const uint64_t *InputFileOffs = (const uint64_t *)Blob.data(); for (unsigned I = 0; I != NumInputFiles; ++I) { // Go find this input file. bool isSystemFile = I >= NumUserFiles; @@ -4389,6 +4126,20 @@ bool ASTReader::readASTFileControlBlock(StringRef Filename, break; } + case KNOWN_MODULE_FILES: { + // Known-but-not-technically-used module files are treated as imports. + if (!NeedsImports) + break; + + unsigned Idx = 0, N = Record.size(); + while (Idx < N) { + std::string Filename = ReadString(Record, Idx); + ResolveImportedPath(Filename, ModuleDir); + Listener.visitImport(Filename); + } + break; + } + default: // No other validation to perform. break; @@ -4401,8 +4152,10 @@ bool ASTReader::isAcceptableASTFile(StringRef Filename, FileManager &FileMgr, const LangOptions &LangOpts, const TargetOptions &TargetOpts, - const PreprocessorOptions &PPOpts) { - SimplePCHValidator validator(LangOpts, TargetOpts, PPOpts, FileMgr); + const PreprocessorOptions &PPOpts, + std::string ExistingModuleCachePath) { + SimplePCHValidator validator(LangOpts, TargetOpts, PPOpts, + ExistingModuleCachePath, FileMgr); return !readASTFileControlBlock(Filename, FileMgr, validator); } @@ -4529,13 +4282,19 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) { } case SUBMODULE_UMBRELLA_HEADER: { - if (const FileEntry *Umbrella = PP.getFileManager().getFile(Blob)) { + std::string Filename = Blob; + ResolveImportedPath(F, Filename); + if (auto *Umbrella = PP.getFileManager().getFile(Filename)) { if (!CurrentModule->getUmbrellaHeader()) - ModMap.setUmbrellaHeader(CurrentModule, Umbrella); - else if (CurrentModule->getUmbrellaHeader() != Umbrella) { - if ((ClientLoadCapabilities & ARR_OutOfDate) == 0) - Error("mismatched umbrella headers in submodule"); - return OutOfDate; + ModMap.setUmbrellaHeader(CurrentModule, Umbrella, Blob); + else if (CurrentModule->getUmbrellaHeader().Entry != Umbrella) { + // This can be a spurious difference caused by changing the VFS to + // point to a different copy of the file, and it is too late to + // to rebuild safely. + // FIXME: If we wrote the virtual paths instead of the 'real' paths, + // after input file validation only real problems would remain and we + // could just error. For now, assume it's okay. + break; } } break; @@ -4561,11 +4320,12 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) { } case SUBMODULE_UMBRELLA_DIR: { - if (const DirectoryEntry *Umbrella - = PP.getFileManager().getDirectory(Blob)) { + std::string Dirname = Blob; + ResolveImportedPath(F, Dirname); + if (auto *Umbrella = PP.getFileManager().getDirectory(Dirname)) { if (!CurrentModule->getUmbrellaDir()) - ModMap.setUmbrellaDir(CurrentModule, Umbrella); - else if (CurrentModule->getUmbrellaDir() != Umbrella) { + ModMap.setUmbrellaDir(CurrentModule, Umbrella, Blob); + else if (CurrentModule->getUmbrellaDir().Entry != Umbrella) { if ((ClientLoadCapabilities & ARR_OutOfDate) == 0) Error("mismatched umbrella directories in submodule"); return OutOfDate; @@ -4696,9 +4456,9 @@ bool ASTReader::ParseLanguageOptions(const RecordData &Record, AllowCompatibleDifferences); } -bool ASTReader::ParseTargetOptions(const RecordData &Record, - bool Complain, - ASTReaderListener &Listener) { +bool ASTReader::ParseTargetOptions(const RecordData &Record, bool Complain, + ASTReaderListener &Listener, + bool AllowCompatibleDifferences) { unsigned Idx = 0; TargetOptions TargetOpts; TargetOpts.Triple = ReadString(Record, Idx); @@ -4711,7 +4471,8 @@ bool ASTReader::ParseTargetOptions(const RecordData &Record, TargetOpts.Features.push_back(ReadString(Record, Idx)); } - return Listener.ReadTargetOptions(TargetOpts, Complain); + return Listener.ReadTargetOptions(TargetOpts, Complain, + AllowCompatibleDifferences); } bool ASTReader::ParseDiagnosticOptions(const RecordData &Record, bool Complain, @@ -4773,8 +4534,10 @@ bool ASTReader::ParseHeaderSearchOptions(const RecordData &Record, HSOpts.UseStandardSystemIncludes = Record[Idx++]; HSOpts.UseStandardCXXIncludes = Record[Idx++]; HSOpts.UseLibcxx = Record[Idx++]; + std::string SpecificModuleCachePath = ReadString(Record, Idx); - return Listener.ReadHeaderSearchOptions(HSOpts, Complain); + return Listener.ReadHeaderSearchOptions(HSOpts, SpecificModuleCachePath, + Complain); } bool ASTReader::ParsePreprocessorOptions(const RecordData &Record, @@ -4823,21 +4586,22 @@ ASTReader::getModulePreprocessedEntity(unsigned GlobalIndex) { return std::make_pair(M, LocalIndex); } -std::pair<PreprocessingRecord::iterator, PreprocessingRecord::iterator> +llvm::iterator_range<PreprocessingRecord::iterator> ASTReader::getModulePreprocessedEntities(ModuleFile &Mod) const { if (PreprocessingRecord *PPRec = PP.getPreprocessingRecord()) return PPRec->getIteratorsForLoadedRange(Mod.BasePreprocessedEntityID, Mod.NumPreprocessedEntities); - return std::make_pair(PreprocessingRecord::iterator(), - PreprocessingRecord::iterator()); + return llvm::make_range(PreprocessingRecord::iterator(), + PreprocessingRecord::iterator()); } -std::pair<ASTReader::ModuleDeclIterator, ASTReader::ModuleDeclIterator> +llvm::iterator_range<ASTReader::ModuleDeclIterator> ASTReader::getModuleFileLevelDecls(ModuleFile &Mod) { - return std::make_pair(ModuleDeclIterator(this, &Mod, Mod.FileSortedDecls), - ModuleDeclIterator(this, &Mod, - Mod.FileSortedDecls + Mod.NumFileSortedDecls)); + return llvm::make_range( + ModuleDeclIterator(this, &Mod, Mod.FileSortedDecls), + ModuleDeclIterator(this, &Mod, + Mod.FileSortedDecls + Mod.NumFileSortedDecls)); } PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) { @@ -4873,13 +4637,14 @@ PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) { case PPD_MACRO_EXPANSION: { bool isBuiltin = Record[0]; IdentifierInfo *Name = nullptr; - MacroDefinition *Def = nullptr; + MacroDefinitionRecord *Def = nullptr; if (isBuiltin) Name = getLocalIdentifier(M, Record[1]); else { - PreprocessedEntityID - GlobalID = getGlobalPreprocessedEntityID(M, Record[1]); - Def =cast<MacroDefinition>(PPRec.getLoadedPreprocessedEntity(GlobalID-1)); + PreprocessedEntityID GlobalID = + getGlobalPreprocessedEntityID(M, Record[1]); + Def = cast<MacroDefinitionRecord>( + PPRec.getLoadedPreprocessedEntity(GlobalID - 1)); } MacroExpansion *ME; @@ -4895,8 +4660,7 @@ PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) { // Decode the identifier info and then check again; if the macro is // still defined and associated with the identifier, IdentifierInfo *II = getLocalIdentifier(M, Record[0]); - MacroDefinition *MD - = new (PPRec) MacroDefinition(II, Range); + MacroDefinitionRecord *MD = new (PPRec) MacroDefinitionRecord(II, Range); if (DeserializationListener) DeserializationListener->MacroDefinitionRead(PPID, MD); @@ -6081,6 +5845,12 @@ Decl *ASTReader::GetExternalDecl(uint32_t ID) { return GetDecl(ID); } +template<typename TemplateSpecializationDecl> +static void completeRedeclChainForTemplateSpecialization(Decl *D) { + if (auto *TSD = dyn_cast<TemplateSpecializationDecl>(D)) + TSD->getSpecializedTemplate()->LoadLazySpecializations(); +} + void ASTReader::CompleteRedeclChain(const Decl *D) { if (NumCurrentElementsDeserializing) { // We arrange to not care about the complete redeclaration chain while we're @@ -6114,6 +5884,47 @@ void ASTReader::CompleteRedeclChain(const Decl *D) { D->getDeclContext()->decls_begin(); } } + + if (auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) + CTSD->getSpecializedTemplate()->LoadLazySpecializations(); + if (auto *VTSD = dyn_cast<VarTemplateSpecializationDecl>(D)) + VTSD->getSpecializedTemplate()->LoadLazySpecializations(); + if (auto *FD = dyn_cast<FunctionDecl>(D)) { + if (auto *Template = FD->getPrimaryTemplate()) + Template->LoadLazySpecializations(); + } +} + +uint64_t ASTReader::ReadCXXCtorInitializersRef(ModuleFile &M, + const RecordData &Record, + unsigned &Idx) { + if (Idx >= Record.size() || Record[Idx] > M.LocalNumCXXCtorInitializers) { + Error("malformed AST file: missing C++ ctor initializers"); + return 0; + } + + unsigned LocalID = Record[Idx++]; + return getGlobalBitOffset(M, M.CXXCtorInitializersOffsets[LocalID - 1]); +} + +CXXCtorInitializer ** +ASTReader::GetExternalCXXCtorInitializers(uint64_t Offset) { + RecordLocation Loc = getLocalBitOffset(Offset); + BitstreamCursor &Cursor = Loc.F->DeclsCursor; + SavedStreamPosition SavedPosition(Cursor); + Cursor.JumpToBit(Loc.Offset); + ReadingKindTracker ReadingKind(Read_Decl, *this); + + RecordData Record; + unsigned Code = Cursor.ReadCode(); + unsigned RecCode = Cursor.readRecord(Code, Record); + if (RecCode != DECL_CXX_CTOR_INITIALIZERS) { + Error("malformed AST file: missing C++ ctor initializers"); + return nullptr; + } + + unsigned Idx = 0; + return ReadCXXCtorInitializers(*Loc.F, Record, Idx); } uint64_t ASTReader::readCXXBaseSpecifiers(ModuleFile &M, @@ -6165,6 +5976,10 @@ ASTReader::getGlobalDeclID(ModuleFile &F, LocalDeclID LocalID) const { bool ASTReader::isDeclIDFromModule(serialization::GlobalDeclID ID, ModuleFile &M) const { + // Predefined decls aren't from any module. + if (ID < NUM_PREDEF_DECL_IDS) + return false; + GlobalDeclMapType::const_iterator I = GlobalDeclMap.find(ID); assert(I != GlobalDeclMap.end() && "Corrupted global declaration map"); return &M == I->second; @@ -6197,39 +6012,55 @@ SourceLocation ASTReader::getSourceLocationForDeclID(GlobalDeclID ID) { return ReadSourceLocation(*Rec.F, RawLocation); } -Decl *ASTReader::GetExistingDecl(DeclID ID) { - if (ID < NUM_PREDEF_DECL_IDS) { - switch ((PredefinedDeclIDs)ID) { - case PREDEF_DECL_NULL_ID: - return nullptr; +static Decl *getPredefinedDecl(ASTContext &Context, PredefinedDeclIDs ID) { + switch (ID) { + case PREDEF_DECL_NULL_ID: + return nullptr; + + case PREDEF_DECL_TRANSLATION_UNIT_ID: + return Context.getTranslationUnitDecl(); - case PREDEF_DECL_TRANSLATION_UNIT_ID: - return Context.getTranslationUnitDecl(); + case PREDEF_DECL_OBJC_ID_ID: + return Context.getObjCIdDecl(); - case PREDEF_DECL_OBJC_ID_ID: - return Context.getObjCIdDecl(); + case PREDEF_DECL_OBJC_SEL_ID: + return Context.getObjCSelDecl(); - case PREDEF_DECL_OBJC_SEL_ID: - return Context.getObjCSelDecl(); + case PREDEF_DECL_OBJC_CLASS_ID: + return Context.getObjCClassDecl(); - case PREDEF_DECL_OBJC_CLASS_ID: - return Context.getObjCClassDecl(); + case PREDEF_DECL_OBJC_PROTOCOL_ID: + return Context.getObjCProtocolDecl(); - case PREDEF_DECL_OBJC_PROTOCOL_ID: - return Context.getObjCProtocolDecl(); + case PREDEF_DECL_INT_128_ID: + return Context.getInt128Decl(); - case PREDEF_DECL_INT_128_ID: - return Context.getInt128Decl(); + case PREDEF_DECL_UNSIGNED_INT_128_ID: + return Context.getUInt128Decl(); - case PREDEF_DECL_UNSIGNED_INT_128_ID: - return Context.getUInt128Decl(); + case PREDEF_DECL_OBJC_INSTANCETYPE_ID: + return Context.getObjCInstanceTypeDecl(); - case PREDEF_DECL_OBJC_INSTANCETYPE_ID: - return Context.getObjCInstanceTypeDecl(); + case PREDEF_DECL_BUILTIN_VA_LIST_ID: + return Context.getBuiltinVaListDecl(); - case PREDEF_DECL_BUILTIN_VA_LIST_ID: - return Context.getBuiltinVaListDecl(); + case PREDEF_DECL_EXTERN_C_CONTEXT_ID: + return Context.getExternCContextDecl(); + } + llvm_unreachable("PredefinedDeclIDs unknown enum value"); +} + +Decl *ASTReader::GetExistingDecl(DeclID ID) { + if (ID < NUM_PREDEF_DECL_IDS) { + Decl *D = getPredefinedDecl(Context, (PredefinedDeclIDs)ID); + if (D) { + // Track that we have merged the declaration with ID \p ID into the + // pre-existing predefined declaration \p D. + auto &Merged = MergedDecls[D->getCanonicalDecl()]; + if (Merged.empty()) + Merged.push_back(ID); } + return D; } unsigned Index = ID - NUM_PREDEF_DECL_IDS; @@ -6326,10 +6157,7 @@ namespace { PredefsVisited[I] = false; } - static bool visit(ModuleFile &M, bool Preorder, void *UserData) { - if (Preorder) - return false; - + static bool visitPostorder(ModuleFile &M, void *UserData) { FindExternalLexicalDeclsVisitor *This = static_cast<FindExternalLexicalDeclsVisitor *>(UserData); @@ -6371,7 +6199,8 @@ ExternalLoadResult ASTReader::FindExternalLexicalDecls(const DeclContext *DC, // There might be lexical decls in multiple modules, for the TU at // least. Walk all of the modules in the order they were loaded. FindExternalLexicalDeclsVisitor Visitor(*this, DC, isKindWeWant, Decls); - ModuleMgr.visitDepthFirst(&FindExternalLexicalDeclsVisitor::visit, &Visitor); + ModuleMgr.visitDepthFirst( + nullptr, &FindExternalLexicalDeclsVisitor::visitPostorder, &Visitor); ++NumLexicalDeclContextsRead; return ELR_Success; } @@ -6460,13 +6289,16 @@ namespace { ArrayRef<const DeclContext *> Contexts; DeclarationName Name; SmallVectorImpl<NamedDecl *> &Decls; + llvm::SmallPtrSetImpl<NamedDecl *> &DeclSet; public: DeclContextNameLookupVisitor(ASTReader &Reader, ArrayRef<const DeclContext *> Contexts, DeclarationName Name, - SmallVectorImpl<NamedDecl *> &Decls) - : Reader(Reader), Contexts(Contexts), Name(Name), Decls(Decls) { } + SmallVectorImpl<NamedDecl *> &Decls, + llvm::SmallPtrSetImpl<NamedDecl *> &DeclSet) + : Reader(Reader), Contexts(Contexts), Name(Name), Decls(Decls), + DeclSet(DeclSet) { } static bool visit(ModuleFile &M, void *UserData) { DeclContextNameLookupVisitor *This @@ -6515,7 +6347,8 @@ namespace { // Record this declaration. FoundAnything = true; - This->Decls.push_back(ND); + if (This->DeclSet.insert(ND).second) + This->Decls.push_back(ND); } return FoundAnything; @@ -6555,6 +6388,7 @@ ASTReader::FindExternalVisibleDeclsByName(const DeclContext *DC, Deserializing LookupResults(this); SmallVector<NamedDecl *, 64> Decls; + llvm::SmallPtrSet<NamedDecl*, 64> DeclSet; // Compute the declaration contexts we need to look into. Multiple such // declaration contexts occur when two declaration contexts from disjoint @@ -6572,7 +6406,7 @@ ASTReader::FindExternalVisibleDeclsByName(const DeclContext *DC, } auto LookUpInContexts = [&](ArrayRef<const DeclContext*> Contexts) { - DeclContextNameLookupVisitor Visitor(*this, Contexts, Name, Decls); + DeclContextNameLookupVisitor Visitor(*this, Contexts, Name, Decls, DeclSet); // If we can definitively determine which module file to look into, // only look there. Otherwise, look in all module files. @@ -6592,19 +6426,14 @@ ASTReader::FindExternalVisibleDeclsByName(const DeclContext *DC, // individually, because finding an entity in one of them doesn't imply that // we can't find a different entity in another one. if (isa<CXXRecordDecl>(DC)) { - auto Kind = Name.getNameKind(); - if (Kind == DeclarationName::CXXConstructorName || - Kind == DeclarationName::CXXDestructorName || - (Kind == DeclarationName::CXXOperatorName && - Name.getCXXOverloadedOperator() == OO_Equal)) { - auto Merged = MergedLookups.find(DC); - if (Merged != MergedLookups.end()) { - for (unsigned I = 0; I != Merged->second.size(); ++I) { - LookUpInContexts(Merged->second[I]); - // We might have just added some more merged lookups. If so, our - // iterator is now invalid, so grab a fresh one before continuing. - Merged = MergedLookups.find(DC); - } + auto Merged = MergedLookups.find(DC); + if (Merged != MergedLookups.end()) { + for (unsigned I = 0; I != Merged->second.size(); ++I) { + const DeclContext *Context = Merged->second[I]; + LookUpInContexts(Context); + // We might have just added some more merged lookups. If so, our + // iterator is now invalid, so grab a fresh one before continuing. + Merged = MergedLookups.find(DC); } } } @@ -6621,6 +6450,7 @@ namespace { ASTReader &Reader; SmallVectorImpl<const DeclContext *> &Contexts; DeclsMap &Decls; + llvm::SmallPtrSet<NamedDecl *, 256> DeclSet; bool VisitAll; public: @@ -6665,7 +6495,8 @@ namespace { // Record this declaration. FoundAnything = true; - This->Decls[ND->getDeclName()].push_back(ND); + if (This->DeclSet.insert(ND).second) + This->Decls[ND->getDeclName()].push_back(ND); } } @@ -6731,6 +6562,12 @@ void ASTReader::PassInterestingDeclsToConsumer() { SaveAndRestore<bool> GuardPassingDeclsToConsumer(PassingDeclsToConsumer, true); + // Ensure that we've loaded all potentially-interesting declarations + // that need to be eagerly loaded. + for (auto ID : EagerlyDeserializedDecls) + GetDecl(ID); + EagerlyDeserializedDecls.clear(); + while (!InterestingDecls.empty()) { Decl *D = InterestingDecls.front(); InterestingDecls.pop_front(); @@ -6749,17 +6586,11 @@ void ASTReader::PassInterestingDeclToConsumer(Decl *D) { void ASTReader::StartTranslationUnit(ASTConsumer *Consumer) { this->Consumer = Consumer; - if (!Consumer) - return; + if (Consumer) + PassInterestingDeclsToConsumer(); - for (unsigned I = 0, N = EagerlyDeserializedDecls.size(); I != N; ++I) { - // Force deserialization of this decl, which will cause it to be queued for - // passing to the consumer. - GetDecl(EagerlyDeserializedDecls[I]); - } - EagerlyDeserializedDecls.clear(); - - PassInterestingDeclsToConsumer(); + if (DeserializationListener) + DeserializationListener->ReaderInitialized(this); } void ASTReader::PrintStats() { @@ -7195,6 +7026,21 @@ void ASTReader::ReadUndefinedButUsed( } } +void ASTReader::ReadMismatchingDeleteExpressions(llvm::MapVector< + FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> & + Exprs) { + for (unsigned Idx = 0, N = DelayedDeleteExprs.size(); Idx != N;) { + FieldDecl *FD = cast<FieldDecl>(GetDecl(DelayedDeleteExprs[Idx++])); + uint64_t Count = DelayedDeleteExprs[Idx++]; + for (uint64_t C = 0; C < Count; ++C) { + SourceLocation DeleteLoc = + SourceLocation::getFromRawEncoding(DelayedDeleteExprs[Idx++]); + const bool IsArrayForm = DelayedDeleteExprs[Idx++]; + Exprs[FD].push_back(std::make_pair(DeleteLoc, IsArrayForm)); + } + } +} + void ASTReader::ReadTentativeDefinitions( SmallVectorImpl<VarDecl *> &TentativeDefs) { for (unsigned I = 0, N = TentativeDefinitions.size(); I != N; ++I) { @@ -7237,16 +7083,6 @@ void ASTReader::ReadExtVectorDecls(SmallVectorImpl<TypedefNameDecl *> &Decls) { ExtVectorDecls.clear(); } -void ASTReader::ReadDynamicClasses(SmallVectorImpl<CXXRecordDecl *> &Decls) { - for (unsigned I = 0, N = DynamicClasses.size(); I != N; ++I) { - CXXRecordDecl *D - = dyn_cast_or_null<CXXRecordDecl>(GetDecl(DynamicClasses[I])); - if (D) - Decls.push_back(D); - } - DynamicClasses.clear(); -} - void ASTReader::ReadUnusedLocalTypedefNameCandidates( llvm::SmallSetVector<const TypedefNameDecl *, 4> &Decls) { for (unsigned I = 0, N = UnusedLocalTypedefNameCandidates.size(); I != N; @@ -7259,17 +7095,6 @@ void ASTReader::ReadUnusedLocalTypedefNameCandidates( UnusedLocalTypedefNameCandidates.clear(); } -void -ASTReader::ReadLocallyScopedExternCDecls(SmallVectorImpl<NamedDecl *> &Decls) { - for (unsigned I = 0, N = LocallyScopedExternCDecls.size(); I != N; ++I) { - NamedDecl *D - = dyn_cast_or_null<NamedDecl>(GetDecl(LocallyScopedExternCDecls[I])); - if (D) - Decls.push_back(D); - } - LocallyScopedExternCDecls.clear(); -} - void ASTReader::ReadReferencedSelectors( SmallVectorImpl<std::pair<Selector, SourceLocation> > &Sels) { if (ReferencedSelectorsData.empty()) @@ -7333,7 +7158,7 @@ void ASTReader::ReadPendingInstantiations( } void ASTReader::ReadLateParsedTemplates( - llvm::DenseMap<const FunctionDecl *, LateParsedTemplate *> &LPTMap) { + llvm::MapVector<const FunctionDecl *, LateParsedTemplate *> &LPTMap) { for (unsigned Idx = 0, N = LateParsedTemplates.size(); Idx < N; /* In loop */) { FunctionDecl *FD = cast<FunctionDecl>(GetDecl(LateParsedTemplates[Idx++])); @@ -7349,7 +7174,7 @@ void ASTReader::ReadLateParsedTemplates( for (unsigned T = 0; T < TokN; ++T) LT->Toks.push_back(ReadToken(*F, LateParsedTemplates, Idx)); - LPTMap[FD] = LT; + LPTMap.insert(std::make_pair(FD, LT)); } LateParsedTemplates.clear(); @@ -7840,92 +7665,89 @@ ASTReader::ReadCXXBaseSpecifier(ModuleFile &F, return Result; } -std::pair<CXXCtorInitializer **, unsigned> +CXXCtorInitializer ** ASTReader::ReadCXXCtorInitializers(ModuleFile &F, const RecordData &Record, unsigned &Idx) { - CXXCtorInitializer **CtorInitializers = nullptr; unsigned NumInitializers = Record[Idx++]; - if (NumInitializers) { - CtorInitializers - = new (Context) CXXCtorInitializer*[NumInitializers]; - for (unsigned i=0; i != NumInitializers; ++i) { - TypeSourceInfo *TInfo = nullptr; - bool IsBaseVirtual = false; - FieldDecl *Member = nullptr; - IndirectFieldDecl *IndirectMember = nullptr; - - CtorInitializerType Type = (CtorInitializerType)Record[Idx++]; - switch (Type) { - case CTOR_INITIALIZER_BASE: - TInfo = GetTypeSourceInfo(F, Record, Idx); - IsBaseVirtual = Record[Idx++]; - break; - - case CTOR_INITIALIZER_DELEGATING: - TInfo = GetTypeSourceInfo(F, Record, Idx); - break; + assert(NumInitializers && "wrote ctor initializers but have no inits"); + auto **CtorInitializers = new (Context) CXXCtorInitializer*[NumInitializers]; + for (unsigned i = 0; i != NumInitializers; ++i) { + TypeSourceInfo *TInfo = nullptr; + bool IsBaseVirtual = false; + FieldDecl *Member = nullptr; + IndirectFieldDecl *IndirectMember = nullptr; - case CTOR_INITIALIZER_MEMBER: - Member = ReadDeclAs<FieldDecl>(F, Record, Idx); - break; + CtorInitializerType Type = (CtorInitializerType)Record[Idx++]; + switch (Type) { + case CTOR_INITIALIZER_BASE: + TInfo = GetTypeSourceInfo(F, Record, Idx); + IsBaseVirtual = Record[Idx++]; + break; - case CTOR_INITIALIZER_INDIRECT_MEMBER: - IndirectMember = ReadDeclAs<IndirectFieldDecl>(F, Record, Idx); - break; - } + case CTOR_INITIALIZER_DELEGATING: + TInfo = GetTypeSourceInfo(F, Record, Idx); + break; - SourceLocation MemberOrEllipsisLoc = ReadSourceLocation(F, Record, Idx); - Expr *Init = ReadExpr(F); - SourceLocation LParenLoc = ReadSourceLocation(F, Record, Idx); - SourceLocation RParenLoc = ReadSourceLocation(F, Record, Idx); - bool IsWritten = Record[Idx++]; - unsigned SourceOrderOrNumArrayIndices; - SmallVector<VarDecl *, 8> Indices; - if (IsWritten) { - SourceOrderOrNumArrayIndices = Record[Idx++]; - } else { - SourceOrderOrNumArrayIndices = Record[Idx++]; - Indices.reserve(SourceOrderOrNumArrayIndices); - for (unsigned i=0; i != SourceOrderOrNumArrayIndices; ++i) - Indices.push_back(ReadDeclAs<VarDecl>(F, Record, Idx)); - } + case CTOR_INITIALIZER_MEMBER: + Member = ReadDeclAs<FieldDecl>(F, Record, Idx); + break; - CXXCtorInitializer *BOMInit; - if (Type == CTOR_INITIALIZER_BASE) { - BOMInit = new (Context) CXXCtorInitializer(Context, TInfo, IsBaseVirtual, - LParenLoc, Init, RParenLoc, - MemberOrEllipsisLoc); - } else if (Type == CTOR_INITIALIZER_DELEGATING) { - BOMInit = new (Context) CXXCtorInitializer(Context, TInfo, LParenLoc, - Init, RParenLoc); - } else if (IsWritten) { - if (Member) - BOMInit = new (Context) CXXCtorInitializer(Context, Member, MemberOrEllipsisLoc, - LParenLoc, Init, RParenLoc); - else - BOMInit = new (Context) CXXCtorInitializer(Context, IndirectMember, - MemberOrEllipsisLoc, LParenLoc, - Init, RParenLoc); + case CTOR_INITIALIZER_INDIRECT_MEMBER: + IndirectMember = ReadDeclAs<IndirectFieldDecl>(F, Record, Idx); + break; + } + + SourceLocation MemberOrEllipsisLoc = ReadSourceLocation(F, Record, Idx); + Expr *Init = ReadExpr(F); + SourceLocation LParenLoc = ReadSourceLocation(F, Record, Idx); + SourceLocation RParenLoc = ReadSourceLocation(F, Record, Idx); + bool IsWritten = Record[Idx++]; + unsigned SourceOrderOrNumArrayIndices; + SmallVector<VarDecl *, 8> Indices; + if (IsWritten) { + SourceOrderOrNumArrayIndices = Record[Idx++]; + } else { + SourceOrderOrNumArrayIndices = Record[Idx++]; + Indices.reserve(SourceOrderOrNumArrayIndices); + for (unsigned i=0; i != SourceOrderOrNumArrayIndices; ++i) + Indices.push_back(ReadDeclAs<VarDecl>(F, Record, Idx)); + } + + CXXCtorInitializer *BOMInit; + if (Type == CTOR_INITIALIZER_BASE) { + BOMInit = new (Context) + CXXCtorInitializer(Context, TInfo, IsBaseVirtual, LParenLoc, Init, + RParenLoc, MemberOrEllipsisLoc); + } else if (Type == CTOR_INITIALIZER_DELEGATING) { + BOMInit = new (Context) + CXXCtorInitializer(Context, TInfo, LParenLoc, Init, RParenLoc); + } else if (IsWritten) { + if (Member) + BOMInit = new (Context) CXXCtorInitializer( + Context, Member, MemberOrEllipsisLoc, LParenLoc, Init, RParenLoc); + else + BOMInit = new (Context) + CXXCtorInitializer(Context, IndirectMember, MemberOrEllipsisLoc, + LParenLoc, Init, RParenLoc); + } else { + if (IndirectMember) { + assert(Indices.empty() && "Indirect field improperly initialized"); + BOMInit = new (Context) + CXXCtorInitializer(Context, IndirectMember, MemberOrEllipsisLoc, + LParenLoc, Init, RParenLoc); } else { - if (IndirectMember) { - assert(Indices.empty() && "Indirect field improperly initialized"); - BOMInit = new (Context) CXXCtorInitializer(Context, IndirectMember, - MemberOrEllipsisLoc, LParenLoc, - Init, RParenLoc); - } else { - BOMInit = CXXCtorInitializer::Create(Context, Member, MemberOrEllipsisLoc, - LParenLoc, Init, RParenLoc, - Indices.data(), Indices.size()); - } + BOMInit = CXXCtorInitializer::Create( + Context, Member, MemberOrEllipsisLoc, LParenLoc, Init, RParenLoc, + Indices.data(), Indices.size()); } - - if (IsWritten) - BOMInit->setSourceOrder(SourceOrderOrNumArrayIndices); - CtorInitializers[i] = BOMInit; } + + if (IsWritten) + BOMInit->setSourceOrder(SourceOrderOrNumArrayIndices); + CtorInitializers[i] = BOMInit; } - return std::make_pair(CtorInitializers, NumInitializers); + return CtorInitializers; } NestedNameSpecifier * @@ -8203,7 +8025,7 @@ void ASTReader::getInputFiles(ModuleFile &F, std::string ASTReader::getOwningModuleNameForDiagnostic(const Decl *D) { // If we know the owning module, use it. - if (Module *M = D->getOwningModule()) + if (Module *M = D->getImportedOwningModule()) return M->getFullModuleName(); // Otherwise, use the name of the top-level module the decl is within. @@ -8243,9 +8065,10 @@ void ASTReader::finishPendingActions() { // Load pending declaration chains. for (unsigned I = 0; I != PendingDeclChains.size(); ++I) { - loadPendingDeclChain(PendingDeclChains[I]); PendingDeclChainsKnown.erase(PendingDeclChains[I]); + loadPendingDeclChain(PendingDeclChains[I]); } + assert(PendingDeclChainsKnown.empty()); PendingDeclChains.clear(); // Make the most recent of the top-level declarations visible. @@ -8298,7 +8121,12 @@ void ASTReader::finishPendingActions() { loadDeclUpdateRecords(Update.first, Update.second); } } - + + // At this point, all update records for loaded decls are in place, so any + // fake class definitions should have become real. + assert(PendingFakeDefinitionData.empty() && + "faked up a class definition but never saw the real one"); + // If we deserialized any C++ or Objective-C class definitions, any // Objective-C protocol definitions, or any redeclarable templates, make sure // that all redeclarations point to the definitions. Note that this can only @@ -8309,10 +8137,12 @@ void ASTReader::finishPendingActions() { // Make sure that the TagType points at the definition. const_cast<TagType*>(TagT)->decl = TD; } - + if (auto RD = dyn_cast<CXXRecordDecl>(D)) { - for (auto R : RD->redecls()) { - assert((R == D) == R->isThisDeclarationADefinition() && + for (auto *R = getMostRecentExistingDecl(RD); R; + R = R->getPreviousDecl()) { + assert((R == D) == + cast<CXXRecordDecl>(R)->isThisDeclarationADefinition() && "declaration thinks it's the definition but it isn't"); cast<CXXRecordDecl>(R)->DefinitionData = RD->DefinitionData; } @@ -8320,34 +8150,36 @@ void ASTReader::finishPendingActions() { continue; } - + if (auto ID = dyn_cast<ObjCInterfaceDecl>(D)) { // Make sure that the ObjCInterfaceType points at the definition. const_cast<ObjCInterfaceType *>(cast<ObjCInterfaceType>(ID->TypeForDecl)) ->Decl = ID; - - for (auto R : ID->redecls()) - R->Data = ID->Data; - + + for (auto *R = getMostRecentExistingDecl(ID); R; R = R->getPreviousDecl()) + cast<ObjCInterfaceDecl>(R)->Data = ID->Data; + continue; } - + if (auto PD = dyn_cast<ObjCProtocolDecl>(D)) { - for (auto R : PD->redecls()) - R->Data = PD->Data; - + for (auto *R = getMostRecentExistingDecl(PD); R; R = R->getPreviousDecl()) + cast<ObjCProtocolDecl>(R)->Data = PD->Data; + continue; } - + auto RTD = cast<RedeclarableTemplateDecl>(D)->getCanonicalDecl(); - for (auto R : RTD->redecls()) - R->Common = RTD->Common; + for (auto *R = getMostRecentExistingDecl(RTD); R; R = R->getPreviousDecl()) + cast<RedeclarableTemplateDecl>(R)->Common = RTD->Common; } PendingDefinitions.clear(); // Load the bodies of any functions or methods we've encountered. We do // this now (delayed) so that we can be sure that the declaration chains // have been fully wired up. + // FIXME: There seems to be no point in delaying this, it does not depend + // on the redecl chains having been wired up. for (PendingBodiesMap::iterator PB = PendingBodies.begin(), PBEnd = PendingBodies.end(); PB != PBEnd; ++PB) { @@ -8364,6 +8196,11 @@ void ASTReader::finishPendingActions() { MD->setLazyBody(PB->second); } PendingBodies.clear(); + + // Do some cleanup. + for (auto *ND : PendingMergedDefinitionsToDeduplicate) + getContext().deduplicateMergedDefinitonsFor(ND); + PendingMergedDefinitionsToDeduplicate.clear(); } void ASTReader::diagnoseOdrViolations() { @@ -8440,6 +8277,10 @@ void ASTReader::diagnoseOdrViolations() { // completed. We only really need to mark FieldDecls as invalid here. if (!isa<TagDecl>(D)) D->setInvalidDecl(); + + // Ensure we don't accidentally recursively enter deserialization while + // we're producing our diagnostic. + Deserializing RecursionGuard(this); std::string CanonDefModule = getOwningModuleNameForDiagnostic(cast<Decl>(CanonDef)); @@ -8461,6 +8302,13 @@ void ASTReader::diagnoseOdrViolations() { } } + if (OdrMergeFailures.empty()) + return; + + // Ensure we don't accidentally recursively enter deserialization while + // we're producing our diagnostics. + Deserializing RecursionGuard(this); + // Issue any pending ODR-failure diagnostics. for (auto &Merge : OdrMergeFailures) { // If we've already pointed out a specific problem with this class, don't @@ -8514,6 +8362,17 @@ void ASTReader::FinishedDeserializing() { --NumCurrentElementsDeserializing; if (NumCurrentElementsDeserializing == 0) { + // Propagate exception specification updates along redeclaration chains. + while (!PendingExceptionSpecUpdates.empty()) { + auto Updates = std::move(PendingExceptionSpecUpdates); + PendingExceptionSpecUpdates.clear(); + for (auto Update : Updates) { + auto *FPT = Update.second->getType()->castAs<FunctionProtoType>(); + SemaObj->UpdateExceptionSpec(Update.second, + FPT->getExtProtoInfo().ExceptionSpec); + } + } + diagnoseOdrViolations(); // We are not in recursive loading, so it's safe to pass the "interesting" @@ -8524,7 +8383,18 @@ void ASTReader::FinishedDeserializing() { } void ASTReader::pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name) { - D = D->getMostRecentDecl(); + if (IdentifierInfo *II = Name.getAsIdentifierInfo()) { + // Remove any fake results before adding any real ones. + auto It = PendingFakeLookupResults.find(II); + if (It != PendingFakeLookupResults.end()) { + for (auto *ND : PendingFakeLookupResults[II]) + SemaObj->IdResolver.RemoveDecl(ND); + // FIXME: this works around module+PCH performance issue. + // Rather than erase the result from the map, which is O(n), just clear + // the vector of NamedDecls. + It->second.clear(); + } + } if (SemaObj->IdResolver.tryAddTopLevelDecl(D, Name) && SemaObj->TUScope) { SemaObj->TUScope->AddDecl(D); @@ -8562,8 +8432,7 @@ ASTReader::ASTReader(Preprocessor &PP, ASTContext &Context, StringRef isysroot, NumLexicalDeclContextsRead(0), TotalLexicalDeclContexts(0), NumVisibleDeclContextsRead(0), TotalVisibleDeclContexts(0), TotalModulesSizeInBits(0), NumCurrentElementsDeserializing(0), - PassingDeclsToConsumer(false), NumCXXBaseSpecifiersLoaded(0), - ReadingKind(Read_None) { + PassingDeclsToConsumer(false), ReadingKind(Read_None) { SourceMgr.setExternalSLocEntrySource(this); } diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp index a783183..02273ed 100644 --- a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp +++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp @@ -66,7 +66,12 @@ namespace clang { serialization::DeclID ReadDeclID(const RecordData &R, unsigned &I) { return Reader.ReadDeclID(F, R, I); } - + + void ReadDeclIDList(SmallVectorImpl<DeclID> &IDs) { + for (unsigned I = 0, Size = Record[Idx++]; I != Size; ++I) + IDs.push_back(ReadDeclID(Record, Idx)); + } + Decl *ReadDecl(const RecordData &R, unsigned &I) { return Reader.ReadDecl(F, R, I); } @@ -103,11 +108,11 @@ namespace clang { return Reader.getSubmodule(readSubmoduleID(R, I)); } - void ReadCXXRecordDefinition(CXXRecordDecl *D); + void ReadCXXRecordDefinition(CXXRecordDecl *D, bool Update); void ReadCXXDefinitionData(struct CXXRecordDecl::DefinitionData &Data, const RecordData &R, unsigned &I); void MergeDefinitionData(CXXRecordDecl *D, - struct CXXRecordDecl::DefinitionData &NewDD); + struct CXXRecordDecl::DefinitionData &&NewDD); static NamedDecl *getAnonymousDeclForMerging(ASTReader &Reader, DeclContext *DC, @@ -118,43 +123,42 @@ namespace clang { /// \brief RAII class used to capture the first ID within a redeclaration /// chain and to introduce it into the list of pending redeclaration chains /// on destruction. - /// - /// The caller can choose not to introduce this ID into the list of pending - /// redeclaration chains by calling \c suppress(). class RedeclarableResult { ASTReader &Reader; GlobalDeclID FirstID; + Decl *MergeWith; mutable bool Owning; Decl::Kind DeclKind; - - void operator=(RedeclarableResult &) LLVM_DELETED_FUNCTION; - + + void operator=(RedeclarableResult &) = delete; + public: RedeclarableResult(ASTReader &Reader, GlobalDeclID FirstID, - Decl::Kind DeclKind) - : Reader(Reader), FirstID(FirstID), Owning(true), DeclKind(DeclKind) { } - - RedeclarableResult(const RedeclarableResult &Other) - : Reader(Other.Reader), FirstID(Other.FirstID), Owning(Other.Owning) , - DeclKind(Other.DeclKind) - { + Decl *MergeWith, Decl::Kind DeclKind) + : Reader(Reader), FirstID(FirstID), MergeWith(MergeWith), + Owning(true), DeclKind(DeclKind) {} + + RedeclarableResult(RedeclarableResult &&Other) + : Reader(Other.Reader), FirstID(Other.FirstID), + MergeWith(Other.MergeWith), Owning(Other.Owning), + DeclKind(Other.DeclKind) { Other.Owning = false; } ~RedeclarableResult() { - if (FirstID && Owning && isRedeclarableDeclKind(DeclKind) && - Reader.PendingDeclChainsKnown.insert(FirstID).second) - Reader.PendingDeclChains.push_back(FirstID); + if (FirstID && Owning && isRedeclarableDeclKind(DeclKind)) { + auto Canon = Reader.GetDecl(FirstID)->getCanonicalDecl(); + if (Reader.PendingDeclChainsKnown.insert(Canon).second) + Reader.PendingDeclChains.push_back(Canon); + } } - + /// \brief Retrieve the first ID. GlobalDeclID getFirstID() const { return FirstID; } - - /// \brief Do not introduce this declaration ID into the set of pending - /// declaration chains. - void suppress() { - Owning = false; - } + + /// \brief Get a known declaration that this should be merged with, if + /// any. + Decl *getKnownMergeTarget() const { return MergeWith; } }; /// \brief Class used to capture the result of searching for an existing @@ -171,7 +175,7 @@ namespace clang { unsigned AnonymousDeclNumber; IdentifierInfo *TypedefNameForLinkage; - void operator=(FindExistingResult&) LLVM_DELETED_FUNCTION; + void operator=(FindExistingResult&) = delete; public: FindExistingResult(ASTReader &Reader) @@ -205,6 +209,8 @@ namespace clang { operator T*() const { return dyn_cast_or_null<T>(Existing); } }; + static DeclContext *getPrimaryContextForMerging(ASTReader &Reader, + DeclContext *DC); FindExistingResult findExisting(NamedDecl *D); public: @@ -216,10 +222,17 @@ namespace clang { TypedefNameForLinkage(nullptr), HasPendingBody(false) {} template <typename DeclT> + static Decl *getMostRecentDeclImpl(Redeclarable<DeclT> *D); + static Decl *getMostRecentDeclImpl(...); + static Decl *getMostRecentDecl(Decl *D); + + template <typename DeclT> static void attachPreviousDeclImpl(ASTReader &Reader, - Redeclarable<DeclT> *D, Decl *Previous); + Redeclarable<DeclT> *D, Decl *Previous, + Decl *Canon); static void attachPreviousDeclImpl(ASTReader &Reader, ...); - static void attachPreviousDecl(ASTReader &Reader, Decl *D, Decl *Previous); + static void attachPreviousDecl(ASTReader &Reader, Decl *D, Decl *Previous, + Decl *Canon); template <typename DeclT> static void attachLatestDeclImpl(Redeclarable<DeclT> *D, Decl *Latest); @@ -390,9 +403,14 @@ void ASTDeclReader::Visit(Decl *D) { // FunctionDecl's body was written last after all other Stmts/Exprs. // We only read it if FD doesn't already have a body (e.g., from another // module). - // FIXME: Also consider = default and = delete. // FIXME: Can we diagnose ODR violations somehow? if (Record[Idx++]) { + if (auto *CD = dyn_cast<CXXConstructorDecl>(FD)) { + CD->NumCtorInitializers = Record[Idx++]; + if (CD->NumCtorInitializers) + CD->CtorInitializers = + Reader.ReadCXXCtorInitializersRef(F, Record, Idx); + } Reader.PendingBodies[FD] = GetCurrentCursorOffset(); HasPendingBody = true; } @@ -440,24 +458,28 @@ void ASTDeclReader::VisitDecl(Decl *D) { D->FromASTFile = true; D->setModulePrivate(Record[Idx++]); D->Hidden = D->isModulePrivate(); - + // Determine whether this declaration is part of a (sub)module. If so, it // may not yet be visible. if (unsigned SubmoduleID = readSubmoduleID(Record, Idx)) { // Store the owning submodule ID in the declaration. D->setOwningModuleID(SubmoduleID); - - // Module-private declarations are never visible, so there is no work to do. - if (!D->isModulePrivate()) { - if (Module *Owner = Reader.getSubmodule(SubmoduleID)) { - if (Owner->NameVisibility != Module::AllVisible) { - // The owning module is not visible. Mark this declaration as hidden. - D->Hidden = true; - - // Note that this declaration was hidden because its owning module is - // not yet visible. - Reader.HiddenNamesMap[Owner].HiddenDecls.push_back(D); - } + + if (D->Hidden) { + // Module-private declarations are never visible, so there is no work to do. + } else if (Reader.getContext().getLangOpts().ModulesLocalVisibility) { + // If local visibility is being tracked, this declaration will become + // hidden and visible as the owning module does. Inform Sema that this + // declaration might not be visible. + D->Hidden = true; + } else if (Module *Owner = Reader.getSubmodule(SubmoduleID)) { + if (Owner->NameVisibility != Module::AllVisible) { + // The owning module is not visible. Mark this declaration as hidden. + D->Hidden = true; + + // Note that this declaration was hidden because its owning module is + // not yet visible. + Reader.HiddenNamesMap[Owner].push_back(D); } } } @@ -470,8 +492,7 @@ void ASTDeclReader::VisitTranslationUnitDecl(TranslationUnitDecl *TU) { void ASTDeclReader::VisitNamedDecl(NamedDecl *ND) { VisitDecl(ND); ND->setDeclName(Reader.ReadDeclarationName(F, Record, Idx)); - if (needsAnonymousDeclarationNumber(ND)) - AnonymousDeclNumber = Record[Idx++]; + AnonymousDeclNumber = Record[Idx++]; } void ASTDeclReader::VisitTypeDecl(TypeDecl *TD) { @@ -981,8 +1002,9 @@ void ASTDeclReader::VisitObjCImplementationDecl(ObjCImplementationDecl *D) { D->setIvarRBraceLoc(ReadSourceLocation(Record, Idx)); D->setHasNonZeroConstructors(Record[Idx++]); D->setHasDestructors(Record[Idx++]); - std::tie(D->IvarInitializers, D->NumIvarInitializers) = - Reader.ReadCXXCtorInitializers(F, Record, Idx); + D->NumIvarInitializers = Record[Idx++]; + if (D->NumIvarInitializers) + D->IvarInitializers = Reader.ReadCXXCtorInitializersRef(F, Record, Idx); } @@ -1041,13 +1063,15 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) { VD->VarDeclBits.SClass = (StorageClass)Record[Idx++]; VD->VarDeclBits.TSCSpec = Record[Idx++]; VD->VarDeclBits.InitStyle = Record[Idx++]; - VD->VarDeclBits.ExceptionVar = Record[Idx++]; - VD->VarDeclBits.NRVOVariable = Record[Idx++]; - VD->VarDeclBits.CXXForRangeDecl = Record[Idx++]; - VD->VarDeclBits.ARCPseudoStrong = Record[Idx++]; - VD->VarDeclBits.IsConstexpr = Record[Idx++]; - VD->VarDeclBits.IsInitCapture = Record[Idx++]; - VD->VarDeclBits.PreviousDeclInSameBlockScope = Record[Idx++]; + if (!isa<ParmVarDecl>(VD)) { + VD->NonParmVarDeclBits.ExceptionVar = Record[Idx++]; + VD->NonParmVarDeclBits.NRVOVariable = Record[Idx++]; + VD->NonParmVarDeclBits.CXXForRangeDecl = Record[Idx++]; + VD->NonParmVarDeclBits.ARCPseudoStrong = Record[Idx++]; + VD->NonParmVarDeclBits.IsConstexpr = Record[Idx++]; + VD->NonParmVarDeclBits.IsInitCapture = Record[Idx++]; + VD->NonParmVarDeclBits.PreviousDeclInSameBlockScope = Record[Idx++]; + } Linkage VarLinkage = Linkage(Record[Idx++]); VD->setCachedLinkage(VarLinkage); @@ -1189,13 +1213,13 @@ void ASTDeclReader::VisitNamespaceDecl(NamespaceDecl *D) { D->LocStart = ReadSourceLocation(Record, Idx); D->RBraceLoc = ReadSourceLocation(Record, Idx); + // Defer loading the anonymous namespace until we've finished merging + // this namespace; loading it might load a later declaration of the + // same namespace, and we have an invariant that older declarations + // get merged before newer ones try to merge. + GlobalDeclID AnonNamespace = 0; if (Redecl.getFirstID() == ThisDeclID) { - // Each module has its own anonymous namespace, which is disjoint from - // any other module's anonymous namespaces, so don't attach the anonymous - // namespace at all. - NamespaceDecl *Anon = ReadDeclAs<NamespaceDecl>(Record, Idx); - if (F.Kind != MK_ImplicitModule && F.Kind != MK_ExplicitModule) - D->setAnonymousNamespace(Anon); + AnonNamespace = ReadDeclID(Record, Idx); } else { // Link this namespace back to the first declaration, which has already // been deserialized. @@ -1203,6 +1227,15 @@ void ASTDeclReader::VisitNamespaceDecl(NamespaceDecl *D) { } mergeRedeclarable(D, Redecl); + + if (AnonNamespace) { + // Each module has its own anonymous namespace, which is disjoint from + // any other module's anonymous namespaces, so don't attach the anonymous + // namespace at all. + NamespaceDecl *Anon = cast<NamespaceDecl>(Reader.GetDecl(AnonNamespace)); + if (F.Kind != MK_ImplicitModule && F.Kind != MK_ExplicitModule) + D->setAnonymousNamespace(Anon); + } } void ASTDeclReader::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) { @@ -1353,7 +1386,7 @@ void ASTDeclReader::ReadCXXDefinitionData( } void ASTDeclReader::MergeDefinitionData( - CXXRecordDecl *D, struct CXXRecordDecl::DefinitionData &MergeDD) { + CXXRecordDecl *D, struct CXXRecordDecl::DefinitionData &&MergeDD) { assert(D->DefinitionData.getNotUpdated() && "merging class definition into non-definition"); auto &DD = *D->DefinitionData.getNotUpdated(); @@ -1364,14 +1397,45 @@ void ASTDeclReader::MergeDefinitionData( // FIXME: We only need to do this if the merged definition declares members // that this definition did not declare, or if it defines members that this // definition did not define. - if (MergeDD.DeclaredSpecialMembers && DD.Definition != MergeDD.Definition) { + if (DD.Definition != MergeDD.Definition) { Reader.MergedLookups[DD.Definition].push_back(MergeDD.Definition); DD.Definition->setHasExternalVisibleStorage(); + + if (DD.Definition->isHidden()) { + // If MergeDD is visible or becomes visible, make the definition visible. + if (!MergeDD.Definition->isHidden()) + DD.Definition->Hidden = false; + else if (Reader.getContext().getLangOpts().ModulesLocalVisibility) { + Reader.getContext().mergeDefinitionIntoModule( + DD.Definition, MergeDD.Definition->getImportedOwningModule(), + /*NotifyListeners*/ false); + Reader.PendingMergedDefinitionsToDeduplicate.insert(DD.Definition); + } else { + auto SubmoduleID = MergeDD.Definition->getOwningModuleID(); + assert(SubmoduleID && "hidden definition in no module"); + Reader.HiddenNamesMap[Reader.getSubmodule(SubmoduleID)].push_back( + DD.Definition); + } + } + } + + auto PFDI = Reader.PendingFakeDefinitionData.find(&DD); + if (PFDI != Reader.PendingFakeDefinitionData.end() && + PFDI->second == ASTReader::PendingFakeDefinitionKind::Fake) { + // We faked up this definition data because we found a class for which we'd + // not yet loaded the definition. Replace it with the real thing now. + assert(!DD.IsLambda && !MergeDD.IsLambda && "faked up lambda definition?"); + PFDI->second = ASTReader::PendingFakeDefinitionKind::FakeLoaded; + + // Don't change which declaration is the definition; that is required + // to be invariant once we select it. + auto *Def = DD.Definition; + DD = std::move(MergeDD); + DD.Definition = Def; + return; } // FIXME: Move this out into a .def file? - // FIXME: Issue a diagnostic on a mismatched MATCH_FIELD, rather than - // asserting; this can happen in the case of an ODR violation. bool DetectedOdrViolation = false; #define OR_FIELD(Field) DD.Field |= MergeDD.Field; #define MATCH_FIELD(Field) \ @@ -1442,7 +1506,7 @@ void ASTDeclReader::MergeDefinitionData( Reader.PendingOdrMergeFailures[DD.Definition].push_back(MergeDD.Definition); } -void ASTDeclReader::ReadCXXRecordDefinition(CXXRecordDecl *D) { +void ASTDeclReader::ReadCXXRecordDefinition(CXXRecordDecl *D, bool Update) { struct CXXRecordDecl::DefinitionData *DD; ASTContext &C = Reader.getContext(); @@ -1457,19 +1521,29 @@ void ASTDeclReader::ReadCXXRecordDefinition(CXXRecordDecl *D) { ReadCXXDefinitionData(*DD, Record, Idx); - // If we're reading an update record, we might already have a definition for - // this record. If so, just merge into it. - if (D->DefinitionData.getNotUpdated()) { - MergeDefinitionData(D, *DD); + // We might already have a definition for this record. This can happen either + // because we're reading an update record, or because we've already done some + // merging. Either way, just merge into it. + CXXRecordDecl *Canon = D->getCanonicalDecl(); + if (auto *CanonDD = Canon->DefinitionData.getNotUpdated()) { + if (CanonDD->Definition != DD->Definition) + Reader.MergedDeclContexts.insert( + std::make_pair(DD->Definition, CanonDD->Definition)); + MergeDefinitionData(Canon, std::move(*DD)); + D->DefinitionData = Canon->DefinitionData; return; } // Propagate the DefinitionData pointer to the canonical declaration, so // that all other deserialized declarations will see it. - CXXRecordDecl *Canon = D->getCanonicalDecl(); if (Canon == D) { D->DefinitionData = DD; D->IsCompleteDefinition = true; + + // If this is an update record, we can have redeclarations already. Make a + // note that we need to propagate the DefinitionData pointer onto them. + if (Update) + Reader.PendingDefinitions.insert(D); } else if (auto *CanonDD = Canon->DefinitionData.getNotUpdated()) { // We have already deserialized a definition of this record. This // definition is no longer really a definition. Note that the pre-existing @@ -1478,7 +1552,7 @@ void ASTDeclReader::ReadCXXRecordDefinition(CXXRecordDecl *D) { std::make_pair(D, CanonDD->Definition)); D->DefinitionData = Canon->DefinitionData; D->IsCompleteDefinition = false; - MergeDefinitionData(D, *DD); + MergeDefinitionData(D, std::move(*DD)); } else { Canon->DefinitionData = DD; D->DefinitionData = Canon->DefinitionData; @@ -1535,7 +1609,7 @@ ASTDeclReader::VisitCXXRecordDeclImpl(CXXRecordDecl *D) { bool WasDefinition = Record[Idx++]; if (WasDefinition) - ReadCXXRecordDefinition(D); + ReadCXXRecordDefinition(D, /*Update*/false); else // Propagate DefinitionData pointer from the canonical declaration. D->DefinitionData = D->getCanonicalDecl()->DefinitionData; @@ -1576,17 +1650,20 @@ void ASTDeclReader::VisitCXXConstructorDecl(CXXConstructorDecl *D) { VisitCXXMethodDecl(D); if (auto *CD = ReadDeclAs<CXXConstructorDecl>(Record, Idx)) - D->setInheritedConstructor(CD); + if (D->isCanonicalDecl()) + D->setInheritedConstructor(CD->getCanonicalDecl()); D->IsExplicitSpecified = Record[Idx++]; - // FIXME: We should defer loading this until we need the constructor's body. - std::tie(D->CtorInitializers, D->NumCtorInitializers) = - Reader.ReadCXXCtorInitializers(F, Record, Idx); } void ASTDeclReader::VisitCXXDestructorDecl(CXXDestructorDecl *D) { VisitCXXMethodDecl(D); - D->OperatorDelete = ReadDeclAs<FunctionDecl>(Record, Idx); + if (auto *OperatorDelete = ReadDeclAs<FunctionDecl>(Record, Idx)) { + auto *Canon = cast<CXXDestructorDecl>(D->getCanonicalDecl()); + // FIXME: Check consistency if we have an old and new operator delete. + if (!Canon->OperatorDelete) + Canon->OperatorDelete = OperatorDelete; + } } void ASTDeclReader::VisitCXXConversionDecl(CXXConversionDecl *D) { @@ -1690,36 +1767,34 @@ ASTDeclReader::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) { return Redecl; } +static DeclID *newDeclIDList(ASTContext &Context, DeclID *Old, + SmallVectorImpl<DeclID> &IDs) { + assert(!IDs.empty() && "no IDs to add to list"); + if (Old) { + IDs.insert(IDs.end(), Old + 1, Old + 1 + Old[0]); + std::sort(IDs.begin(), IDs.end()); + IDs.erase(std::unique(IDs.begin(), IDs.end()), IDs.end()); + } + + auto *Result = new (Context) DeclID[1 + IDs.size()]; + *Result = IDs.size(); + std::copy(IDs.begin(), IDs.end(), Result + 1); + return Result; +} + void ASTDeclReader::VisitClassTemplateDecl(ClassTemplateDecl *D) { RedeclarableResult Redecl = VisitRedeclarableTemplateDecl(D); if (ThisDeclID == Redecl.getFirstID()) { // This ClassTemplateDecl owns a CommonPtr; read it to keep track of all of // the specializations. - SmallVector<serialization::DeclID, 2> SpecIDs; - SpecIDs.push_back(0); - - // Specializations. - unsigned Size = Record[Idx++]; - SpecIDs[0] += Size; - for (unsigned I = 0; I != Size; ++I) - SpecIDs.push_back(ReadDeclID(Record, Idx)); - - // Partial specializations. - Size = Record[Idx++]; - SpecIDs[0] += Size; - for (unsigned I = 0; I != Size; ++I) - SpecIDs.push_back(ReadDeclID(Record, Idx)); - - ClassTemplateDecl::Common *CommonPtr = D->getCommonPtr(); - if (SpecIDs[0]) { - typedef serialization::DeclID DeclID; - - // FIXME: Append specializations! - CommonPtr->LazySpecializations - = new (Reader.getContext()) DeclID [SpecIDs.size()]; - memcpy(CommonPtr->LazySpecializations, SpecIDs.data(), - SpecIDs.size() * sizeof(DeclID)); + SmallVector<serialization::DeclID, 32> SpecIDs; + ReadDeclIDList(SpecIDs); + + if (!SpecIDs.empty()) { + auto *CommonPtr = D->getCommonPtr(); + CommonPtr->LazySpecializations = newDeclIDList( + Reader.getContext(), CommonPtr->LazySpecializations, SpecIDs); } } @@ -1741,30 +1816,13 @@ void ASTDeclReader::VisitVarTemplateDecl(VarTemplateDecl *D) { if (ThisDeclID == Redecl.getFirstID()) { // This VarTemplateDecl owns a CommonPtr; read it to keep track of all of // the specializations. - SmallVector<serialization::DeclID, 2> SpecIDs; - SpecIDs.push_back(0); - - // Specializations. - unsigned Size = Record[Idx++]; - SpecIDs[0] += Size; - for (unsigned I = 0; I != Size; ++I) - SpecIDs.push_back(ReadDeclID(Record, Idx)); - - // Partial specializations. - Size = Record[Idx++]; - SpecIDs[0] += Size; - for (unsigned I = 0; I != Size; ++I) - SpecIDs.push_back(ReadDeclID(Record, Idx)); - - VarTemplateDecl::Common *CommonPtr = D->getCommonPtr(); - if (SpecIDs[0]) { - typedef serialization::DeclID DeclID; + SmallVector<serialization::DeclID, 32> SpecIDs; + ReadDeclIDList(SpecIDs); - // FIXME: Append specializations! - CommonPtr->LazySpecializations = - new (Reader.getContext()) DeclID[SpecIDs.size()]; - memcpy(CommonPtr->LazySpecializations, SpecIDs.data(), - SpecIDs.size() * sizeof(DeclID)); + if (!SpecIDs.empty()) { + auto *CommonPtr = D->getCommonPtr(); + CommonPtr->LazySpecializations = newDeclIDList( + Reader.getContext(), CommonPtr->LazySpecializations, SpecIDs); } } } @@ -1823,7 +1881,7 @@ ASTDeclReader::VisitClassTemplateSpecializationDeclImpl( // definition. if (auto *DDD = D->DefinitionData.getNotUpdated()) { if (auto *CanonDD = CanonSpec->DefinitionData.getNotUpdated()) { - MergeDefinitionData(CanonSpec, *DDD); + MergeDefinitionData(CanonSpec, std::move(*DDD)); Reader.PendingDefinitions.erase(D); Reader.MergedDeclContexts.insert( std::make_pair(D, CanonDD->Definition)); @@ -1876,17 +1934,13 @@ void ASTDeclReader::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) { if (ThisDeclID == Redecl.getFirstID()) { // This FunctionTemplateDecl owns a CommonPtr; read it. + SmallVector<serialization::DeclID, 32> SpecIDs; + ReadDeclIDList(SpecIDs); - // Read the function specialization declaration IDs. The specializations - // themselves will be loaded if they're needed. - if (unsigned NumSpecs = Record[Idx++]) { - // FIXME: Append specializations! - FunctionTemplateDecl::Common *CommonPtr = D->getCommonPtr(); - CommonPtr->LazySpecializations = new (Reader.getContext()) - serialization::DeclID[NumSpecs + 1]; - CommonPtr->LazySpecializations[0] = NumSpecs; - for (unsigned I = 0; I != NumSpecs; ++I) - CommonPtr->LazySpecializations[I + 1] = ReadDeclID(Record, Idx); + if (!SpecIDs.empty()) { + auto *CommonPtr = D->getCommonPtr(); + CommonPtr->LazySpecializations = newDeclIDList( + Reader.getContext(), CommonPtr->LazySpecializations, SpecIDs); } } } @@ -2049,15 +2103,25 @@ ASTDeclReader::VisitDeclContext(DeclContext *DC) { } template <typename T> -ASTDeclReader::RedeclarableResult +ASTDeclReader::RedeclarableResult ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) { DeclID FirstDeclID = ReadDeclID(Record, Idx); - + Decl *MergeWith = nullptr; + // 0 indicates that this declaration was the only declaration of its entity, // and is used for space optimization. if (FirstDeclID == 0) FirstDeclID = ThisDeclID; - + else if (unsigned N = Record[Idx++]) { + // We have some declarations that must be before us in our redeclaration + // chain. Read them now, and remember that we ought to merge with one of + // them. + // FIXME: Provide a known merge target to the second and subsequent such + // declaration. + for (unsigned I = 0; I != N; ++I) + MergeWith = ReadDecl(Record, Idx/*, MergeWith*/); + } + T *FirstDecl = cast_or_null<T>(Reader.GetDecl(FirstDeclID)); if (FirstDecl != D) { // We delay loading of the redeclaration chain to avoid deeply nested calls. @@ -2065,6 +2129,7 @@ ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) { // which is the one that matters and mark the real previous DeclID to be // loaded & attached later on. D->RedeclLink = Redeclarable<T>::PreviousDeclLink(FirstDecl); + D->First = FirstDecl->getCanonicalDecl(); } // Note that this declaration has been deserialized. @@ -2072,7 +2137,7 @@ ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) { // The result structure takes care to note that we need to load the // other declaration chains for this ID. - return RedeclarableResult(Reader, FirstDeclID, + return RedeclarableResult(Reader, FirstDeclID, MergeWith, static_cast<T *>(D)->getKind()); } @@ -2083,23 +2148,19 @@ void ASTDeclReader::mergeRedeclarable(Redeclarable<T> *DBase, RedeclarableResult &Redecl, DeclID TemplatePatternID) { T *D = static_cast<T*>(DBase); - T *DCanon = D->getCanonicalDecl(); - if (D != DCanon && - // IDs < NUM_PREDEF_DECL_IDS are not loaded from an AST file. - Redecl.getFirstID() >= NUM_PREDEF_DECL_IDS && - (!Reader.getContext().getLangOpts().Modules || - Reader.getOwningModuleFile(DCanon) == Reader.getOwningModuleFile(D))) { - // All redeclarations between this declaration and its originally-canonical - // declaration get pulled in when we load DCanon; we don't need to - // perform any more merging now. - Redecl.suppress(); - } // If modules are not available, there is no reason to perform this merge. if (!Reader.getContext().getLangOpts().Modules) return; - if (FindExistingResult ExistingRes = findExisting(D)) + // If we're not the canonical declaration, we don't need to merge. + if (!DBase->isFirstDecl()) + return; + + if (auto *Existing = Redecl.getKnownMergeTarget()) + // We already know of an existing declaration we should merge with. + mergeRedeclarable(D, cast<T>(Existing), Redecl, TemplatePatternID); + else if (FindExistingResult ExistingRes = findExisting(D)) if (T *Existing = ExistingRes) mergeRedeclarable(D, Existing, Redecl, TemplatePatternID); } @@ -2120,7 +2181,8 @@ void ASTDeclReader::mergeTemplatePattern(RedeclarableTemplateDecl *D, auto *DPattern = D->getTemplatedDecl(); auto *ExistingPattern = Existing->getTemplatedDecl(); RedeclarableResult Result(Reader, DPattern->getCanonicalDecl()->getGlobalID(), - DPattern->getKind()); + /*MergeWith*/ExistingPattern, DPattern->getKind()); + if (auto *DClass = dyn_cast<CXXRecordDecl>(DPattern)) { // Merge with any existing definition. // FIXME: This is duplicated in several places. Refactor. @@ -2128,13 +2190,14 @@ void ASTDeclReader::mergeTemplatePattern(RedeclarableTemplateDecl *D, cast<CXXRecordDecl>(ExistingPattern)->getCanonicalDecl(); if (auto *DDD = DClass->DefinitionData.getNotUpdated()) { if (auto *ExistingDD = ExistingClass->DefinitionData.getNotUpdated()) { - MergeDefinitionData(ExistingClass, *DDD); + MergeDefinitionData(ExistingClass, std::move(*DDD)); Reader.PendingDefinitions.erase(DClass); Reader.MergedDeclContexts.insert( std::make_pair(DClass, ExistingDD->Definition)); DClass->IsCompleteDefinition = false; } else { ExistingClass->DefinitionData = DClass->DefinitionData; + Reader.PendingDefinitions.insert(DClass); } } DClass->DefinitionData = ExistingClass->DefinitionData; @@ -2163,14 +2226,18 @@ void ASTDeclReader::mergeRedeclarable(Redeclarable<T> *DBase, T *Existing, T *ExistingCanon = Existing->getCanonicalDecl(); T *DCanon = D->getCanonicalDecl(); if (ExistingCanon != DCanon) { - assert(DCanon->getGlobalID() == Redecl.getFirstID()); + assert(DCanon->getGlobalID() == Redecl.getFirstID() && + "already merged this declaration"); // Have our redeclaration link point back at the canonical declaration // of the existing declaration, so that this declaration has the // appropriate canonical declaration. D->RedeclLink = Redeclarable<T>::PreviousDeclLink(ExistingCanon); + D->First = ExistingCanon; // When we merge a namespace, update its pointer to the first namespace. + // We cannot have loaded any redeclarations of this declaration yet, so + // there's nothing else that needs to be updated. if (auto *Namespace = dyn_cast<NamespaceDecl>(D)) Namespace->AnonOrFirstNamespaceAndInline.setPointer( assert_cast<NamespaceDecl*>(ExistingCanon)); @@ -2181,14 +2248,11 @@ void ASTDeclReader::mergeRedeclarable(Redeclarable<T> *DBase, T *Existing, DTemplate, assert_cast<RedeclarableTemplateDecl*>(ExistingCanon), TemplatePatternID); - // If this declaration was the canonical declaration, make a note of - // that. We accept the linear algorithm here because the number of - // unique canonical declarations of an entity should always be tiny. + // If this declaration was the canonical declaration, make a note of that. if (DCanon == D) { - SmallVectorImpl<DeclID> &Merged = Reader.MergedDecls[ExistingCanon]; - if (std::find(Merged.begin(), Merged.end(), Redecl.getFirstID()) - == Merged.end()) - Merged.push_back(Redecl.getFirstID()); + Reader.MergedDecls[ExistingCanon].push_back(Redecl.getFirstID()); + if (Reader.PendingDeclChainsKnown.insert(ExistingCanon).second) + Reader.PendingDeclChains.push_back(ExistingCanon); } } } @@ -2529,42 +2593,71 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) { /// Find the context in which we should search for previous declarations when /// looking for declarations to merge. -static DeclContext *getPrimaryContextForMerging(DeclContext *DC) { +DeclContext *ASTDeclReader::getPrimaryContextForMerging(ASTReader &Reader, + DeclContext *DC) { if (NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC)) return ND->getOriginalNamespace(); - // There is one tricky case here: if DC is a class with no definition, then - // we're merging a declaration whose definition is added by an update record, - // but we've not yet loaded that update record. In this case, we use the - // canonical declaration for merging until we get a real definition. - // FIXME: When we add a definition, we may need to move the partial lookup - // information from the canonical declaration onto the chosen definition. - if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC)) - return RD->getPrimaryContext(); + if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC)) { + // Try to dig out the definition. + auto *DD = RD->DefinitionData.getNotUpdated(); + if (!DD) + DD = RD->getCanonicalDecl()->DefinitionData.getNotUpdated(); + + // If there's no definition yet, then DC's definition is added by an update + // record, but we've not yet loaded that update record. In this case, we + // commit to DC being the canonical definition now, and will fix this when + // we load the update record. + if (!DD) { + DD = new (Reader.Context) struct CXXRecordDecl::DefinitionData(RD); + RD->IsCompleteDefinition = true; + RD->DefinitionData = DD; + RD->getCanonicalDecl()->DefinitionData = DD; + + // Track that we did this horrible thing so that we can fix it later. + Reader.PendingFakeDefinitionData.insert( + std::make_pair(DD, ASTReader::PendingFakeDefinitionKind::Fake)); + } + + return DD->Definition; + } if (EnumDecl *ED = dyn_cast<EnumDecl>(DC)) return ED->getASTContext().getLangOpts().CPlusPlus? ED->getDefinition() : nullptr; + // We can see the TU here only if we have no Sema object. In that case, + // there's no TU scope to look in, so using the DC alone is sufficient. + if (auto *TU = dyn_cast<TranslationUnitDecl>(DC)) + return TU; + return nullptr; } ASTDeclReader::FindExistingResult::~FindExistingResult() { + // Record that we had a typedef name for linkage whether or not we merge + // with that declaration. + if (TypedefNameForLinkage) { + DeclContext *DC = New->getDeclContext()->getRedeclContext(); + Reader.ImportedTypedefNamesForLinkage.insert( + std::make_pair(std::make_pair(DC, TypedefNameForLinkage), New)); + return; + } + if (!AddResult || Existing) return; DeclarationName Name = New->getDeclName(); DeclContext *DC = New->getDeclContext()->getRedeclContext(); - if (TypedefNameForLinkage) { - Reader.ImportedTypedefNamesForLinkage.insert( - std::make_pair(std::make_pair(DC, TypedefNameForLinkage), New)); - } else if (!Name) { - assert(needsAnonymousDeclarationNumber(New)); + if (needsAnonymousDeclarationNumber(New)) { setAnonymousDeclForMerging(Reader, New->getLexicalDeclContext(), AnonymousDeclNumber, New); - } else if (DC->isTranslationUnit() && Reader.SemaObj) { - Reader.SemaObj->IdResolver.tryAddTopLevelDecl(New, Name); - } else if (DeclContext *MergeDC = getPrimaryContextForMerging(DC)) { + } else if (DC->isTranslationUnit() && Reader.SemaObj && + !Reader.getContext().getLangOpts().CPlusPlus) { + if (Reader.SemaObj->IdResolver.tryAddTopLevelDecl(New, Name)) + Reader.PendingFakeLookupResults[Name.getAsIdentifierInfo()] + .push_back(New); + } else if (DeclContext *MergeDC = getPrimaryContextForMerging(Reader, DC)) { // Add the declaration to its redeclaration context so later merging // lookups will find it. MergeDC->makeDeclVisibleInContextImpl(New, /*Internal*/true); @@ -2582,12 +2675,11 @@ static NamedDecl *getDeclForMerging(NamedDecl *Found, // If we found a typedef declaration that gives a name to some other // declaration, then we want that inner declaration. Declarations from // AST files are handled via ImportedTypedefNamesForLinkage. - if (Found->isFromASTFile()) return 0; - if (auto *TND = dyn_cast<TypedefNameDecl>(Found)) { - if (auto *TT = TND->getTypeSourceInfo()->getType()->getAs<TagType>()) - if (TT->getDecl()->getTypedefNameForAnonDecl() == TND) - return TT->getDecl(); - } + if (Found->isFromASTFile()) + return 0; + + if (auto *TND = dyn_cast<TypedefNameDecl>(Found)) + return TND->getAnonDeclWithTypedefName(); return 0; } @@ -2608,17 +2700,12 @@ NamedDecl *ASTDeclReader::getAnonymousDeclForMerging(ASTReader &Reader, // If this is the first time, but we have parsed a declaration of the context, // build the anonymous declaration list from the parsed declaration. if (!cast<Decl>(DC)->isFromASTFile()) { - unsigned Index = 0; - for (Decl *LexicalD : DC->decls()) { - auto *ND = dyn_cast<NamedDecl>(LexicalD); - if (!ND || !needsAnonymousDeclarationNumber(ND)) - continue; - if (Previous.size() == Index) + numberAnonymousDeclsWithin(DC, [&](NamedDecl *ND, unsigned Number) { + if (Previous.size() == Number) Previous.push_back(cast<NamedDecl>(ND->getCanonicalDecl())); else - Previous[Index] = cast<NamedDecl>(ND->getCanonicalDecl()); - ++Index; - } + Previous[Number] = cast<NamedDecl>(ND->getCanonicalDecl()); + }); } return Index < Previous.size() ? Previous[Index] : nullptr; @@ -2646,8 +2733,6 @@ ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) { // unmergeable contexts. FindExistingResult Result(Reader, D, /*Existing=*/nullptr, AnonymousDeclNumber, TypedefNameForLinkage); - // FIXME: We may still need to pull in the redeclaration chain; there can - // be redeclarations via 'decltype'. Result.suppress(); return Result; } @@ -2667,16 +2752,16 @@ ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) { // was not imported. } - if (!Name) { + if (needsAnonymousDeclarationNumber(D)) { // This is an anonymous declaration that we may need to merge. Look it up // in its context by number. - assert(needsAnonymousDeclarationNumber(D)); if (auto *Existing = getAnonymousDeclForMerging( Reader, D->getLexicalDeclContext(), AnonymousDeclNumber)) if (isSameEntity(Existing, D)) return FindExistingResult(Reader, D, Existing, AnonymousDeclNumber, TypedefNameForLinkage); - } else if (DC->isTranslationUnit() && Reader.SemaObj) { + } else if (DC->isTranslationUnit() && Reader.SemaObj && + !Reader.getContext().getLangOpts().CPlusPlus) { IdentifierResolver &IdResolver = Reader.SemaObj->IdResolver; // Temporarily consider the identifier to be up-to-date. We don't want to @@ -2710,7 +2795,7 @@ ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) { return FindExistingResult(Reader, D, Existing, AnonymousDeclNumber, TypedefNameForLinkage); } - } else if (DeclContext *MergeDC = getPrimaryContextForMerging(DC)) { + } else if (DeclContext *MergeDC = getPrimaryContextForMerging(Reader, DC)) { DeclContext::lookup_result R = MergeDC->noload_lookup(Name); for (DeclContext::lookup_iterator I = R.begin(), E = R.end(); I != E; ++I) { if (NamedDecl *Existing = getDeclForMerging(*I, TypedefNameForLinkage)) @@ -2738,20 +2823,47 @@ ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) { } template<typename DeclT> +Decl *ASTDeclReader::getMostRecentDeclImpl(Redeclarable<DeclT> *D) { + return D->RedeclLink.getLatestNotUpdated(); +} +Decl *ASTDeclReader::getMostRecentDeclImpl(...) { + llvm_unreachable("getMostRecentDecl on non-redeclarable declaration"); +} + +Decl *ASTDeclReader::getMostRecentDecl(Decl *D) { + assert(D); + + switch (D->getKind()) { +#define ABSTRACT_DECL(TYPE) +#define DECL(TYPE, BASE) \ + case Decl::TYPE: \ + return getMostRecentDeclImpl(cast<TYPE##Decl>(D)); +#include "clang/AST/DeclNodes.inc" + } + llvm_unreachable("unknown decl kind"); +} + +Decl *ASTReader::getMostRecentExistingDecl(Decl *D) { + return ASTDeclReader::getMostRecentDecl(D->getCanonicalDecl()); +} + +template<typename DeclT> void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader, Redeclarable<DeclT> *D, - Decl *Previous) { + Decl *Previous, Decl *Canon) { D->RedeclLink.setPrevious(cast<DeclT>(Previous)); + D->First = cast<DeclT>(Previous)->First; } namespace clang { template<> void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader, Redeclarable<FunctionDecl> *D, - Decl *Previous) { + Decl *Previous, Decl *Canon) { FunctionDecl *FD = static_cast<FunctionDecl*>(D); FunctionDecl *PrevFD = cast<FunctionDecl>(Previous); FD->RedeclLink.setPrevious(PrevFD); + FD->First = PrevFD->First; // If the previous declaration is an inline function declaration, then this // declaration is too. @@ -2774,16 +2886,17 @@ void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader, FD->IsInline = true; } - // If this declaration has an unresolved exception specification but the - // previous declaration had a resolved one, resolve the exception - // specification now. + // If we need to propagate an exception specification along the redecl + // chain, make a note of that so that we can do so later. auto *FPT = FD->getType()->getAs<FunctionProtoType>(); auto *PrevFPT = PrevFD->getType()->getAs<FunctionProtoType>(); - if (FPT && PrevFPT && - isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && - !isUnresolvedExceptionSpec(PrevFPT->getExceptionSpecType())) { - Reader.Context.adjustExceptionSpec( - FD, PrevFPT->getExtProtoInfo().ExceptionSpec); + if (FPT && PrevFPT) { + bool IsUnresolved = isUnresolvedExceptionSpec(FPT->getExceptionSpecType()); + bool WasUnresolved = + isUnresolvedExceptionSpec(PrevFPT->getExceptionSpecType()); + if (IsUnresolved != WasUnresolved) + Reader.PendingExceptionSpecUpdates.insert( + std::make_pair(Canon, IsUnresolved ? PrevFD : FD)); } } } @@ -2792,14 +2905,14 @@ void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader, ...) { } void ASTDeclReader::attachPreviousDecl(ASTReader &Reader, Decl *D, - Decl *Previous) { + Decl *Previous, Decl *Canon) { assert(D && Previous); switch (D->getKind()) { #define ABSTRACT_DECL(TYPE) -#define DECL(TYPE, BASE) \ - case Decl::TYPE: \ - attachPreviousDeclImpl(Reader, cast<TYPE##Decl>(D), Previous); \ +#define DECL(TYPE, BASE) \ + case Decl::TYPE: \ + attachPreviousDeclImpl(Reader, cast<TYPE##Decl>(D), Previous, Canon); \ break; #include "clang/AST/DeclNodes.inc" } @@ -2859,29 +2972,6 @@ void ASTReader::markIncompleteDeclChain(Decl *D) { } } -ASTReader::MergedDeclsMap::iterator -ASTReader::combineStoredMergedDecls(Decl *Canon, GlobalDeclID CanonID) { - // If we don't have any stored merged declarations, just look in the - // merged declarations set. - StoredMergedDeclsMap::iterator StoredPos = StoredMergedDecls.find(CanonID); - if (StoredPos == StoredMergedDecls.end()) - return MergedDecls.find(Canon); - - // Append the stored merged declarations to the merged declarations set. - MergedDeclsMap::iterator Pos = MergedDecls.find(Canon); - if (Pos == MergedDecls.end()) - Pos = MergedDecls.insert(std::make_pair(Canon, - SmallVector<DeclID, 2>())).first; - Pos->second.append(StoredPos->second.begin(), StoredPos->second.end()); - StoredMergedDecls.erase(StoredPos); - - // Sort and uniquify the set of merged declarations. - llvm::array_pod_sort(Pos->second.begin(), Pos->second.end()); - Pos->second.erase(std::unique(Pos->second.begin(), Pos->second.end()), - Pos->second.end()); - return Pos; -} - /// \brief Read the declaration at the given offset from the AST file. Decl *ASTReader::ReadDeclRecord(DeclID ID) { unsigned Index = ID - NUM_PREDEF_DECL_IDS; @@ -3086,6 +3176,9 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) { case DECL_CXX_BASE_SPECIFIERS: Error("attempt to read a C++ base-specifier record as a declaration"); return nullptr; + case DECL_CXX_CTOR_INITIALIZERS: + Error("attempt to read a C++ ctor initializer record as a declaration"); + return nullptr; case DECL_IMPORT: // Note: last entry of the ImportDecl record is the number of stored source // locations. @@ -3199,47 +3292,55 @@ void ASTReader::loadDeclUpdateRecords(serialization::DeclID ID, Decl *D) { } namespace { - /// \brief Module visitor class that finds all of the redeclarations of a - /// + /// \brief Module visitor class that finds all of the redeclarations of a + /// redeclarable declaration. class RedeclChainVisitor { ASTReader &Reader; SmallVectorImpl<DeclID> &SearchDecls; llvm::SmallPtrSetImpl<Decl *> &Deserialized; GlobalDeclID CanonID; SmallVector<Decl *, 4> Chain; - + public: RedeclChainVisitor(ASTReader &Reader, SmallVectorImpl<DeclID> &SearchDecls, llvm::SmallPtrSetImpl<Decl *> &Deserialized, GlobalDeclID CanonID) : Reader(Reader), SearchDecls(SearchDecls), Deserialized(Deserialized), - CanonID(CanonID) { - for (unsigned I = 0, N = SearchDecls.size(); I != N; ++I) - addToChain(Reader.GetDecl(SearchDecls[I])); + CanonID(CanonID) { + // Ensure that the canonical ID goes at the start of the chain. + addToChain(Reader.GetDecl(CanonID)); } - - static bool visit(ModuleFile &M, bool Preorder, void *UserData) { - if (Preorder) - return false; - - return static_cast<RedeclChainVisitor *>(UserData)->visit(M); + + static ModuleManager::DFSPreorderControl + visitPreorder(ModuleFile &M, void *UserData) { + return static_cast<RedeclChainVisitor *>(UserData)->visitPreorder(M); } - + + static bool visitPostorder(ModuleFile &M, void *UserData) { + return static_cast<RedeclChainVisitor *>(UserData)->visitPostorder(M); + } + void addToChain(Decl *D) { if (!D) return; - + if (Deserialized.erase(D)) Chain.push_back(D); } - + void searchForID(ModuleFile &M, GlobalDeclID GlobalID) { // Map global ID of the first declaration down to the local ID // used in this module file. DeclID ID = Reader.mapGlobalIDToModuleFileGlobalID(M, GlobalID); if (!ID) return; - + + // If the search decl was from this module, add it to the chain before any + // of its redeclarations in this module or users of it, and after any from + // imported modules. + if (CanonID != GlobalID && Reader.isDeclIDFromModule(GlobalID, M)) + addToChain(Reader.GetDecl(GlobalID)); + // Perform a binary search to find the local redeclarations for this // declaration (if any). const LocalRedeclarationsInfo Compare = { ID, 0 }; @@ -3269,8 +3370,36 @@ namespace { for (unsigned I = 0; I != N; ++I) addToChain(Reader.GetLocalDecl(M, M.RedeclarationChains[Offset++])); } - - bool visit(ModuleFile &M) { + + bool needsToVisitImports(ModuleFile &M, GlobalDeclID GlobalID) { + DeclID ID = Reader.mapGlobalIDToModuleFileGlobalID(M, GlobalID); + if (!ID) + return false; + + const LocalRedeclarationsInfo Compare = {ID, 0}; + const LocalRedeclarationsInfo *Result = std::lower_bound( + M.RedeclarationsMap, + M.RedeclarationsMap + M.LocalNumRedeclarationsInMap, Compare); + if (Result == M.RedeclarationsMap + M.LocalNumRedeclarationsInMap || + Result->FirstID != ID) { + return true; + } + unsigned Offset = Result->Offset; + unsigned N = M.RedeclarationChains[Offset]; + // We don't need to visit a module or any of its imports if we've already + // deserialized the redecls from this module. + return N != 0; + } + + ModuleManager::DFSPreorderControl visitPreorder(ModuleFile &M) { + for (unsigned I = 0, N = SearchDecls.size(); I != N; ++I) { + if (needsToVisitImports(M, SearchDecls[I])) + return ModuleManager::Continue; + } + return ModuleManager::SkipImports; + } + + bool visitPostorder(ModuleFile &M) { // Visit each of the declarations. for (unsigned I = 0, N = SearchDecls.size(); I != N; ++I) searchForID(M, SearchDecls[I]); @@ -3285,41 +3414,46 @@ namespace { }; } -void ASTReader::loadPendingDeclChain(serialization::GlobalDeclID ID) { - Decl *D = GetDecl(ID); - Decl *CanonDecl = D->getCanonicalDecl(); - +void ASTReader::loadPendingDeclChain(Decl *CanonDecl) { + // The decl might have been merged into something else after being added to + // our list. If it was, just skip it. + if (!CanonDecl->isCanonicalDecl()) + return; + // Determine the set of declaration IDs we'll be searching for. - SmallVector<DeclID, 1> SearchDecls; - GlobalDeclID CanonID = 0; - if (D == CanonDecl) { - SearchDecls.push_back(ID); // Always first. - CanonID = ID; - } - MergedDeclsMap::iterator MergedPos = combineStoredMergedDecls(CanonDecl, ID); + SmallVector<DeclID, 16> SearchDecls; + GlobalDeclID CanonID = CanonDecl->getGlobalID(); + if (CanonID) + SearchDecls.push_back(CanonDecl->getGlobalID()); // Always first. + MergedDeclsMap::iterator MergedPos = MergedDecls.find(CanonDecl); if (MergedPos != MergedDecls.end()) SearchDecls.append(MergedPos->second.begin(), MergedPos->second.end()); - + // Build up the list of redeclarations. RedeclChainVisitor Visitor(*this, SearchDecls, RedeclsDeserialized, CanonID); - ModuleMgr.visitDepthFirst(&RedeclChainVisitor::visit, &Visitor); - + ModuleMgr.visitDepthFirst(&RedeclChainVisitor::visitPreorder, + &RedeclChainVisitor::visitPostorder, &Visitor); + // Retrieve the chains. ArrayRef<Decl *> Chain = Visitor.getChain(); - if (Chain.empty()) + if (Chain.empty() || (Chain.size() == 1 && Chain[0] == CanonDecl)) return; - + // Hook up the chains. - Decl *MostRecent = CanonDecl->getMostRecentDecl(); + // + // FIXME: We have three different dispatches on decl kind here; maybe + // we should instead generate one loop per kind and dispatch up-front? + Decl *MostRecent = ASTDeclReader::getMostRecentDecl(CanonDecl); + if (!MostRecent) + MostRecent = CanonDecl; for (unsigned I = 0, N = Chain.size(); I != N; ++I) { if (Chain[I] == CanonDecl) continue; - ASTDeclReader::attachPreviousDecl(*this, Chain[I], MostRecent); + ASTDeclReader::attachPreviousDecl(*this, Chain[I], MostRecent, CanonDecl); MostRecent = Chain[I]; } - - ASTDeclReader::attachLatestDecl(CanonDecl, MostRecent); + ASTDeclReader::attachLatestDecl(CanonDecl, MostRecent); } namespace { @@ -3513,13 +3647,25 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile, while (Idx < Record.size()) { switch ((DeclUpdateKind)Record[Idx++]) { case UPD_CXX_ADDED_IMPLICIT_MEMBER: { + auto *RD = cast<CXXRecordDecl>(D); // FIXME: If we also have an update record for instantiating the // definition of D, we need that to happen before we get here. Decl *MD = Reader.ReadDecl(ModuleFile, Record, Idx); assert(MD && "couldn't read decl from update record"); // FIXME: We should call addHiddenDecl instead, to add the member // to its DeclContext. - cast<CXXRecordDecl>(D)->addedMember(MD); + RD->addedMember(MD); + + // If we've added a new special member to a class definition that is not + // the canonical definition, then we need special member lookups in the + // canonical definition to also look into our class. + auto *DD = RD->DefinitionData.getNotUpdated(); + if (DD && DD->Definition != RD) { + auto &Merged = Reader.MergedLookups[DD->Definition]; + // FIXME: Avoid the linear-time scan here. + if (std::find(Merged.begin(), Merged.end(), RD) == Merged.end()) + Merged.push_back(RD); + } break; } @@ -3567,13 +3713,12 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile, }); } FD->setInnerLocStart(Reader.ReadSourceLocation(ModuleFile, Record, Idx)); - if (auto *CD = dyn_cast<CXXConstructorDecl>(FD)) - std::tie(CD->CtorInitializers, CD->NumCtorInitializers) = - Reader.ReadCXXCtorInitializers(ModuleFile, Record, Idx); - if (auto *DD = dyn_cast<CXXDestructorDecl>(FD)) - // FIXME: Check consistency. - DD->setOperatorDelete(Reader.ReadDeclAs<FunctionDecl>(ModuleFile, - Record, Idx)); + if (auto *CD = dyn_cast<CXXConstructorDecl>(FD)) { + CD->NumCtorInitializers = Record[Idx++]; + if (CD->NumCtorInitializers) + CD->CtorInitializers = + Reader.ReadCXXCtorInitializersRef(F, Record, Idx); + } // Store the offset of the body so we can lazily load it later. Reader.PendingBodies[FD] = GetCurrentCursorOffset(); HasPendingBody = true; @@ -3583,16 +3728,20 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile, case UPD_CXX_INSTANTIATED_CLASS_DEFINITION: { auto *RD = cast<CXXRecordDecl>(D); - bool HadDefinition = RD->getDefinition(); - ReadCXXRecordDefinition(RD); + auto *OldDD = RD->DefinitionData.getNotUpdated(); + bool HadRealDefinition = + OldDD && (OldDD->Definition != RD || + !Reader.PendingFakeDefinitionData.count(OldDD)); + ReadCXXRecordDefinition(RD, /*Update*/true); + // Visible update is handled separately. uint64_t LexicalOffset = Record[Idx++]; - if (!HadDefinition && LexicalOffset) { + if (!HadRealDefinition && LexicalOffset) { RD->setHasExternalLexicalStorage(true); Reader.ReadDeclContextStorage(ModuleFile, ModuleFile.DeclsCursor, - std::make_pair(LexicalOffset, 0), - ModuleFile.DeclContextInfos[RD]); - Reader.PendingDefinitions.insert(RD); + std::make_pair(LexicalOffset, 0), + ModuleFile.DeclContextInfos[RD]); + Reader.PendingFakeDefinitionData.erase(OldDD); } auto TSK = (TemplateSpecializationKind)Record[Idx++]; @@ -3636,24 +3785,36 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile, break; } + case UPD_CXX_RESOLVED_DTOR_DELETE: { + // Set the 'operator delete' directly to avoid emitting another update + // record. + auto *Del = Reader.ReadDeclAs<FunctionDecl>(ModuleFile, Record, Idx); + auto *First = cast<CXXDestructorDecl>(D->getCanonicalDecl()); + // FIXME: Check consistency if we have an old and new operator delete. + if (!First->OperatorDelete) + First->OperatorDelete = Del; + break; + } + case UPD_CXX_RESOLVED_EXCEPTION_SPEC: { - // FIXME: This doesn't send the right notifications if there are - // ASTMutationListeners other than an ASTWriter. FunctionProtoType::ExceptionSpecInfo ESI; SmallVector<QualType, 8> ExceptionStorage; Reader.readExceptionSpec(ModuleFile, ExceptionStorage, ESI, Record, Idx); - for (auto *Redecl : merged_redecls(D)) { - auto *FD = cast<FunctionDecl>(Redecl); - auto *FPT = FD->getType()->castAs<FunctionProtoType>(); - if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType())) { - // AST invariant: if any exception spec in the redecl chain is - // resolved, all are resolved. We don't need to go any further. - // FIXME: If the exception spec is resolved, check that it matches. - break; - } + + // Update this declaration's exception specification, if needed. + auto *FD = cast<FunctionDecl>(D); + auto *FPT = FD->getType()->castAs<FunctionProtoType>(); + // FIXME: If the exception specification is already present, check that it + // matches. + if (isUnresolvedExceptionSpec(FPT->getExceptionSpecType())) { FD->setType(Reader.Context.getFunctionType( FPT->getReturnType(), FPT->getParamTypes(), FPT->getExtProtoInfo().withExceptionSpec(ESI))); + + // When we get to the end of deserializing, see if there are other decls + // that we need to propagate this exception specification onto. + Reader.PendingExceptionSpecUpdates.insert( + std::make_pair(FD->getCanonicalDecl(), FD)); } break; } @@ -3685,10 +3846,30 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile, case UPD_STATIC_LOCAL_NUMBER: Reader.Context.setStaticLocalNumber(cast<VarDecl>(D), Record[Idx++]); break; + case UPD_DECL_MARKED_OPENMP_THREADPRIVATE: D->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit( Reader.Context, ReadSourceRange(Record, Idx))); break; + + case UPD_DECL_EXPORTED: + unsigned SubmoduleID = readSubmoduleID(Record, Idx); + Module *Owner = SubmoduleID ? Reader.getSubmodule(SubmoduleID) : nullptr; + if (Reader.getContext().getLangOpts().ModulesLocalVisibility) { + // FIXME: This doesn't send the right notifications if there are + // ASTMutationListeners other than an ASTWriter. + Reader.getContext().mergeDefinitionIntoModule(cast<NamedDecl>(D), Owner, + /*NotifyListeners*/false); + Reader.PendingMergedDefinitionsToDeduplicate.insert(cast<NamedDecl>(D)); + } else if (Owner && Owner->NameVisibility != Module::AllVisible) { + // If Owner is made visible at some later point, make this declaration + // visible too. + Reader.HiddenNamesMap[Owner].push_back(D); + } else { + // The declaration is now visible. + D->Hidden = false; + } + break; } } } diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp index 4ef2e73..d1ecd46 100644 --- a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp +++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp @@ -486,7 +486,7 @@ void ASTStmtReader::VisitStringLiteral(StringLiteral *E) { // Read string data SmallString<16> Str(&Record[Idx], &Record[Idx] + Len); - E->setString(Reader.getContext(), Str.str(), kind, isPascal); + E->setString(Reader.getContext(), Str, kind, isPascal); Idx += Len; // Read source locations @@ -1826,6 +1826,7 @@ void OMPClauseReader::VisitOMPScheduleClause(OMPScheduleClause *C) { C->setScheduleKind( static_cast<OpenMPScheduleClauseKind>(Record[Idx++])); C->setChunkSize(Reader->Reader.ReadSubExpr()); + C->setHelperChunkSize(Reader->Reader.ReadSubExpr()); C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx)); C->setScheduleKindLoc(Reader->ReadSourceLocation(Record, Idx)); C->setCommaLoc(Reader->ReadSourceLocation(Record, Idx)); @@ -1889,6 +1890,22 @@ void OMPClauseReader::VisitOMPLastprivateClause(OMPLastprivateClause *C) { for (unsigned i = 0; i != NumVars; ++i) Vars.push_back(Reader->Reader.ReadSubExpr()); C->setVarRefs(Vars); + Vars.clear(); + for (unsigned i = 0; i != NumVars; ++i) + Vars.push_back(Reader->Reader.ReadSubExpr()); + C->setPrivateCopies(Vars); + Vars.clear(); + for (unsigned i = 0; i != NumVars; ++i) + Vars.push_back(Reader->Reader.ReadSubExpr()); + C->setSourceExprs(Vars); + Vars.clear(); + for (unsigned i = 0; i != NumVars; ++i) + Vars.push_back(Reader->Reader.ReadSubExpr()); + C->setDestinationExprs(Vars); + Vars.clear(); + for (unsigned i = 0; i != NumVars; ++i) + Vars.push_back(Reader->Reader.ReadSubExpr()); + C->setAssignmentOps(Vars); } void OMPClauseReader::VisitOMPSharedClause(OMPSharedClause *C) { @@ -1917,6 +1934,18 @@ void OMPClauseReader::VisitOMPReductionClause(OMPReductionClause *C) { for (unsigned i = 0; i != NumVars; ++i) Vars.push_back(Reader->Reader.ReadSubExpr()); C->setVarRefs(Vars); + Vars.clear(); + for (unsigned i = 0; i != NumVars; ++i) + Vars.push_back(Reader->Reader.ReadSubExpr()); + C->setLHSExprs(Vars); + Vars.clear(); + for (unsigned i = 0; i != NumVars; ++i) + Vars.push_back(Reader->Reader.ReadSubExpr()); + C->setRHSExprs(Vars); + Vars.clear(); + for (unsigned i = 0; i != NumVars; ++i) + Vars.push_back(Reader->Reader.ReadSubExpr()); + C->setReductionOps(Vars); } void OMPClauseReader::VisitOMPLinearClause(OMPLinearClause *C) { @@ -1928,7 +1957,20 @@ void OMPClauseReader::VisitOMPLinearClause(OMPLinearClause *C) { for (unsigned i = 0; i != NumVars; ++i) Vars.push_back(Reader->Reader.ReadSubExpr()); C->setVarRefs(Vars); + Vars.clear(); + for (unsigned i = 0; i != NumVars; ++i) + Vars.push_back(Reader->Reader.ReadSubExpr()); + C->setInits(Vars); + Vars.clear(); + for (unsigned i = 0; i != NumVars; ++i) + Vars.push_back(Reader->Reader.ReadSubExpr()); + C->setUpdates(Vars); + Vars.clear(); + for (unsigned i = 0; i != NumVars; ++i) + Vars.push_back(Reader->Reader.ReadSubExpr()); + C->setFinals(Vars); C->setStep(Reader->Reader.ReadSubExpr()); + C->setCalcStep(Reader->Reader.ReadSubExpr()); } void OMPClauseReader::VisitOMPAlignedClause(OMPAlignedClause *C) { @@ -1946,21 +1988,45 @@ void OMPClauseReader::VisitOMPAlignedClause(OMPAlignedClause *C) { void OMPClauseReader::VisitOMPCopyinClause(OMPCopyinClause *C) { C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx)); unsigned NumVars = C->varlist_size(); - SmallVector<Expr *, 16> Vars; - Vars.reserve(NumVars); + SmallVector<Expr *, 16> Exprs; + Exprs.reserve(NumVars); for (unsigned i = 0; i != NumVars; ++i) - Vars.push_back(Reader->Reader.ReadSubExpr()); - C->setVarRefs(Vars); + Exprs.push_back(Reader->Reader.ReadSubExpr()); + C->setVarRefs(Exprs); + Exprs.clear(); + for (unsigned i = 0; i != NumVars; ++i) + Exprs.push_back(Reader->Reader.ReadSubExpr()); + C->setSourceExprs(Exprs); + Exprs.clear(); + for (unsigned i = 0; i != NumVars; ++i) + Exprs.push_back(Reader->Reader.ReadSubExpr()); + C->setDestinationExprs(Exprs); + Exprs.clear(); + for (unsigned i = 0; i != NumVars; ++i) + Exprs.push_back(Reader->Reader.ReadSubExpr()); + C->setAssignmentOps(Exprs); } void OMPClauseReader::VisitOMPCopyprivateClause(OMPCopyprivateClause *C) { C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx)); unsigned NumVars = C->varlist_size(); - SmallVector<Expr *, 16> Vars; - Vars.reserve(NumVars); + SmallVector<Expr *, 16> Exprs; + Exprs.reserve(NumVars); for (unsigned i = 0; i != NumVars; ++i) - Vars.push_back(Reader->Reader.ReadSubExpr()); - C->setVarRefs(Vars); + Exprs.push_back(Reader->Reader.ReadSubExpr()); + C->setVarRefs(Exprs); + Exprs.clear(); + for (unsigned i = 0; i != NumVars; ++i) + Exprs.push_back(Reader->Reader.ReadSubExpr()); + C->setSourceExprs(Exprs); + Exprs.clear(); + for (unsigned i = 0; i != NumVars; ++i) + Exprs.push_back(Reader->Reader.ReadSubExpr()); + C->setDestinationExprs(Exprs); + Exprs.clear(); + for (unsigned i = 0; i != NumVars; ++i) + Exprs.push_back(Reader->Reader.ReadSubExpr()); + C->setAssignmentOps(Exprs); } void OMPClauseReader::VisitOMPFlushClause(OMPFlushClause *C) { @@ -2135,6 +2201,9 @@ void ASTStmtReader::VisitOMPAtomicDirective(OMPAtomicDirective *D) { D->setX(Reader.ReadSubExpr()); D->setV(Reader.ReadSubExpr()); D->setExpr(Reader.ReadSubExpr()); + D->setUpdateExpr(Reader.ReadSubExpr()); + D->IsXLHSInRHSPart = Record[Idx++] != 0; + D->IsPostfixUpdate = Record[Idx++] != 0; } void ASTStmtReader::VisitOMPTargetDirective(OMPTargetDirective *D) { @@ -2423,11 +2492,12 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) { SourceLocation MemberLoc = ReadSourceLocation(F, Record, Idx); DeclarationNameInfo MemberNameInfo(MemberD->getDeclName(), MemberLoc); bool IsArrow = Record[Idx++]; + SourceLocation OperatorLoc = ReadSourceLocation(F, Record, Idx); - S = MemberExpr::Create(Context, Base, IsArrow, QualifierLoc, + S = MemberExpr::Create(Context, Base, IsArrow, OperatorLoc, QualifierLoc, TemplateKWLoc, MemberD, FoundDecl, MemberNameInfo, - HasTemplateKWAndArgsInfo ? &ArgInfo : nullptr, - T, VK, OK); + HasTemplateKWAndArgsInfo ? &ArgInfo : nullptr, T, + VK, OK); ReadDeclarationNameLoc(F, cast<MemberExpr>(S)->MemberDNLoc, MemberD->getDeclName(), Record, Idx); if (HadMultipleCandidates) diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp index 6c60d45..bf74c84 100644 --- a/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp +++ b/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp @@ -60,14 +60,14 @@ using namespace clang; using namespace clang::serialization; template <typename T, typename Allocator> -static StringRef data(const std::vector<T, Allocator> &v) { +static StringRef bytes(const std::vector<T, Allocator> &v) { if (v.empty()) return StringRef(); return StringRef(reinterpret_cast<const char*>(&v[0]), sizeof(T) * v.size()); } template <typename T> -static StringRef data(const SmallVectorImpl<T> &v) { +static StringRef bytes(const SmallVectorImpl<T> &v) { return StringRef(reinterpret_cast<const char*>(v.data()), sizeof(T) * v.size()); } @@ -867,6 +867,7 @@ void ASTWriter::WriteBlockInfoBlock() { RECORD(MODULE_NAME); RECORD(MODULE_MAP_FILE); RECORD(IMPORTS); + RECORD(KNOWN_MODULE_FILES); RECORD(LANGUAGE_OPTIONS); RECORD(TARGET_OPTIONS); RECORD(ORIGINAL_FILE); @@ -892,7 +893,6 @@ void ASTWriter::WriteBlockInfoBlock() { RECORD(STATISTICS); RECORD(TENTATIVE_DEFINITIONS); RECORD(UNUSED_FILESCOPED_DECLS); - RECORD(LOCALLY_SCOPED_EXTERN_C_DECLS); RECORD(SELECTOR_OFFSETS); RECORD(METHOD_POOL); RECORD(PP_COUNTER_VALUE); @@ -924,11 +924,9 @@ void ASTWriter::WriteBlockInfoBlock() { RECORD(OBJC_CATEGORIES_MAP); RECORD(FILE_SORTED_DECLS); RECORD(IMPORTED_MODULES); - RECORD(MERGED_DECLARATIONS); RECORD(LOCAL_REDECLARATIONS); RECORD(OBJC_CATEGORIES); RECORD(MACRO_OFFSET); - RECORD(MACRO_TABLE); RECORD(LATE_PARSED_TEMPLATE); RECORD(OPTIMIZE_PRAGMA_OPTIONS); @@ -941,10 +939,12 @@ void ASTWriter::WriteBlockInfoBlock() { // Preprocessor Block. BLOCK(PREPROCESSOR_BLOCK); - RECORD(PP_MACRO_OBJECT_LIKE); + RECORD(PP_MACRO_DIRECTIVE_HISTORY); RECORD(PP_MACRO_FUNCTION_LIKE); + RECORD(PP_MACRO_OBJECT_LIKE); + RECORD(PP_MODULE_MACRO); RECORD(PP_TOKEN); - + // Decls and Types block. BLOCK(DECLTYPES_BLOCK); RECORD(TYPE_EXT_QUAL); @@ -1062,7 +1062,8 @@ void ASTWriter::WriteBlockInfoBlock() { /// to an absolute path and removing nested './'s. /// /// \return \c true if the path was changed. -bool cleanPathForOutput(FileManager &FileMgr, SmallVectorImpl<char> &Path) { +static bool cleanPathForOutput(FileManager &FileMgr, + SmallVectorImpl<char> &Path) { bool Changed = false; if (!llvm::sys::path::is_absolute(StringRef(Path.data(), Path.size()))) { @@ -1159,12 +1160,17 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context, Stream.EmitRecordWithBlob(MetadataAbbrevCode, Record, getClangFullRepositoryVersion()); - // Signature - Record.clear(); - Record.push_back(getSignature()); - Stream.EmitRecord(SIGNATURE, Record); - if (WritingModule) { + // For implicit modules we output a signature that we can use to ensure + // duplicate module builds don't collide in the cache as their output order + // is non-deterministic. + // FIXME: Remove this when output is deterministic. + if (Context.getLangOpts().ImplicitModules) { + Record.clear(); + Record.push_back(getSignature()); + Stream.EmitRecord(SIGNATURE, Record); + } + // Module name BitCodeAbbrev *Abbrev = new BitCodeAbbrev(); Abbrev->Add(BitCodeAbbrevOp(MODULE_NAME)); @@ -1222,20 +1228,28 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context, serialization::ModuleManager &Mgr = Chain->getModuleManager(); Record.clear(); - for (ModuleManager::ModuleIterator M = Mgr.begin(), MEnd = Mgr.end(); - M != MEnd; ++M) { + for (auto *M : Mgr) { // Skip modules that weren't directly imported. - if (!(*M)->isDirectlyImported()) + if (!M->isDirectlyImported()) continue; - Record.push_back((unsigned)(*M)->Kind); // FIXME: Stable encoding - AddSourceLocation((*M)->ImportLoc, Record); - Record.push_back((*M)->File->getSize()); - Record.push_back((*M)->File->getModificationTime()); - Record.push_back((*M)->Signature); - AddPath((*M)->FileName, Record); + Record.push_back((unsigned)M->Kind); // FIXME: Stable encoding + AddSourceLocation(M->ImportLoc, Record); + Record.push_back(M->File->getSize()); + Record.push_back(M->File->getModificationTime()); + Record.push_back(M->Signature); + AddPath(M->FileName, Record); } Stream.EmitRecord(IMPORTS, Record); + + // Also emit a list of known module files that were not imported, + // but are made available by this module. + // FIXME: Should we also include a signature here? + Record.clear(); + for (auto *E : Mgr.getAdditionalKnownModuleFiles()) + AddPath(E->getName(), Record); + if (!Record.empty()) + Stream.EmitRecord(KNOWN_MODULE_FILES, Record); } // Language options. @@ -1341,6 +1355,8 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context, Record.push_back(HSOpts.UseStandardSystemIncludes); Record.push_back(HSOpts.UseStandardCXXIncludes); Record.push_back(HSOpts.UseLibcxx); + // Write out the specific module cache path that contains the module files. + AddString(PP.getHeaderSearchInfo().getModuleCachePath(), Record); Stream.EmitRecord(HEADER_SEARCH_OPTIONS, Record); // Preprocessor options. @@ -1467,7 +1483,7 @@ void ASTWriter::WriteInputFiles(SourceManager &SourceMgr, unsigned UserFilesNum = 0; // Write out all of the input files. - std::vector<uint32_t> InputFileOffsets; + std::vector<uint64_t> InputFileOffsets; for (std::deque<InputFileEntry>::iterator I = SortedFiles.begin(), E = SortedFiles.end(); I != E; ++I) { const InputFileEntry &Entry = *I; @@ -1514,7 +1530,7 @@ void ASTWriter::WriteInputFiles(SourceManager &SourceMgr, Record.push_back(INPUT_FILE_OFFSETS); Record.push_back(InputFileOffsets.size()); Record.push_back(UserFilesNum); - Stream.EmitRecordWithBlob(OffsetsAbbrevCode, Record, data(InputFileOffsets)); + Stream.EmitRecordWithBlob(OffsetsAbbrevCode, Record, bytes(InputFileOffsets)); } //===----------------------------------------------------------------------===// @@ -1756,7 +1772,7 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) { Record.push_back(NumHeaderSearchEntries); Record.push_back(TableData.size()); TableData.append(GeneratorTrait.strings_begin(),GeneratorTrait.strings_end()); - Stream.EmitRecordWithBlob(TableAbbrev, Record, TableData.str()); + Stream.EmitRecordWithBlob(TableAbbrev, Record, TableData); // Free all of the strings we had to duplicate. for (unsigned I = 0, N = SavedStrings.size(); I != N; ++I) @@ -1909,7 +1925,7 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr, Record.push_back(SOURCE_LOCATION_OFFSETS); Record.push_back(SLocEntryOffsets.size()); Record.push_back(SourceMgr.getNextLocalOffset() - 1); // skip dummy - Stream.EmitRecordWithBlob(SLocOffsetsAbbrev, Record, data(SLocEntryOffsets)); + Stream.EmitRecordWithBlob(SLocOffsetsAbbrev, Record, bytes(SLocEntryOffsets)); // Write the source location entry preloads array, telling the AST // reader which source locations entries it should load eagerly. @@ -1956,52 +1972,6 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr, // Preprocessor Serialization //===----------------------------------------------------------------------===// -namespace { -class ASTMacroTableTrait { -public: - typedef IdentID key_type; - typedef key_type key_type_ref; - - struct Data { - uint32_t MacroDirectivesOffset; - }; - - typedef Data data_type; - typedef const data_type &data_type_ref; - typedef unsigned hash_value_type; - typedef unsigned offset_type; - - static hash_value_type ComputeHash(IdentID IdID) { - return llvm::hash_value(IdID); - } - - std::pair<unsigned,unsigned> - static EmitKeyDataLength(raw_ostream& Out, - key_type_ref Key, data_type_ref Data) { - unsigned KeyLen = 4; // IdentID. - unsigned DataLen = 4; // MacroDirectivesOffset. - return std::make_pair(KeyLen, DataLen); - } - - static void EmitKey(raw_ostream& Out, key_type_ref Key, unsigned KeyLen) { - using namespace llvm::support; - endian::Writer<little>(Out).write<uint32_t>(Key); - } - - static void EmitData(raw_ostream& Out, key_type_ref Key, data_type_ref Data, - unsigned) { - using namespace llvm::support; - endian::Writer<little>(Out).write<uint32_t>(Data.MacroDirectivesOffset); - } -}; -} // end anonymous namespace - -static int compareMacroDirectives( - const std::pair<const IdentifierInfo *, MacroDirective *> *X, - const std::pair<const IdentifierInfo *, MacroDirective *> *Y) { - return X->first->getName().compare(Y->first->getName()); -} - static bool shouldIgnoreMacro(MacroDirective *MD, bool IsModule, const Preprocessor &PP) { if (MacroInfo *MI = MD->getMacroInfo()) @@ -2009,10 +1979,6 @@ static bool shouldIgnoreMacro(MacroDirective *MD, bool IsModule, return true; if (IsModule) { - // Re-export any imported directives. - if (MD->isImported()) - return false; - SourceLocation Loc = MD->getLocation(); if (Loc.isInvalid()) return true; @@ -2032,6 +1998,7 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) { WritePreprocessorDetail(*PPRec); RecordData Record; + RecordData ModuleMacroRecord; // If the preprocessor __COUNTER__ value has been bumped, remember it. if (PP.getCounterValue() != 0) { @@ -2052,75 +2019,75 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) { // Loop over all the macro directives that are live at the end of the file, // emitting each to the PP section. - // Construct the list of macro directives that need to be serialized. - SmallVector<std::pair<const IdentifierInfo *, MacroDirective *>, 2> - MacroDirectives; - for (Preprocessor::macro_iterator - I = PP.macro_begin(/*IncludeExternalMacros=*/false), - E = PP.macro_end(/*IncludeExternalMacros=*/false); - I != E; ++I) { - MacroDirectives.push_back(std::make_pair(I->first, I->second)); - } - + // Construct the list of identifiers with macro directives that need to be + // serialized. + SmallVector<const IdentifierInfo *, 128> MacroIdentifiers; + for (auto &Id : PP.getIdentifierTable()) + if (Id.second->hadMacroDefinition() && + (!Id.second->isFromAST() || + Id.second->hasChangedSinceDeserialization())) + MacroIdentifiers.push_back(Id.second); // Sort the set of macro definitions that need to be serialized by the // name of the macro, to provide a stable ordering. - llvm::array_pod_sort(MacroDirectives.begin(), MacroDirectives.end(), - &compareMacroDirectives); - - llvm::OnDiskChainedHashTableGenerator<ASTMacroTableTrait> Generator; + std::sort(MacroIdentifiers.begin(), MacroIdentifiers.end(), + llvm::less_ptr<IdentifierInfo>()); // Emit the macro directives as a list and associate the offset with the // identifier they belong to. - for (unsigned I = 0, N = MacroDirectives.size(); I != N; ++I) { - const IdentifierInfo *Name = MacroDirectives[I].first; - uint64_t MacroDirectiveOffset = Stream.GetCurrentBitNo(); - MacroDirective *MD = MacroDirectives[I].second; - - // If the macro or identifier need no updates, don't write the macro history - // for this one. - // FIXME: Chain the macro history instead of re-writing it. - if (MD->isFromPCH() && - Name->isFromAST() && !Name->hasChangedSinceDeserialization()) - continue; + for (const IdentifierInfo *Name : MacroIdentifiers) { + MacroDirective *MD = PP.getLocalMacroDirectiveHistory(Name); + auto StartOffset = Stream.GetCurrentBitNo(); // Emit the macro directives in reverse source order. for (; MD; MD = MD->getPrevious()) { + // Once we hit an ignored macro, we're done: the rest of the chain + // will all be ignored macros. if (shouldIgnoreMacro(MD, IsModule, PP)) - continue; + break; AddSourceLocation(MD->getLocation(), Record); Record.push_back(MD->getKind()); if (auto *DefMD = dyn_cast<DefMacroDirective>(MD)) { - MacroID InfoID = getMacroRef(DefMD->getInfo(), Name); - Record.push_back(InfoID); - Record.push_back(DefMD->getOwningModuleID()); - Record.push_back(DefMD->isAmbiguous()); - } else if (auto *UndefMD = dyn_cast<UndefMacroDirective>(MD)) { - Record.push_back(UndefMD->getOwningModuleID()); - } else { - auto *VisMD = cast<VisibilityMacroDirective>(MD); + Record.push_back(getMacroRef(DefMD->getInfo(), Name)); + } else if (auto *VisMD = dyn_cast<VisibilityMacroDirective>(MD)) { Record.push_back(VisMD->isPublic()); } + } - if (MD->isImported()) { - auto Overrides = MD->getOverriddenModules(); - Record.push_back(Overrides.size()); - for (auto Override : Overrides) - Record.push_back(Override); + // Write out any exported module macros. + bool EmittedModuleMacros = false; + if (IsModule) { + auto Leafs = PP.getLeafModuleMacros(Name); + SmallVector<ModuleMacro*, 8> Worklist(Leafs.begin(), Leafs.end()); + llvm::DenseMap<ModuleMacro*, unsigned> Visits; + while (!Worklist.empty()) { + auto *Macro = Worklist.pop_back_val(); + + // Emit a record indicating this submodule exports this macro. + ModuleMacroRecord.push_back( + getSubmoduleID(Macro->getOwningModule())); + ModuleMacroRecord.push_back(getMacroRef(Macro->getMacroInfo(), Name)); + for (auto *M : Macro->overrides()) + ModuleMacroRecord.push_back(getSubmoduleID(M->getOwningModule())); + + Stream.EmitRecord(PP_MODULE_MACRO, ModuleMacroRecord); + ModuleMacroRecord.clear(); + + // Enqueue overridden macros once we've visited all their ancestors. + for (auto *M : Macro->overrides()) + if (++Visits[M] == M->getNumOverridingMacros()) + Worklist.push_back(M); + + EmittedModuleMacros = true; } } - if (Record.empty()) + + if (Record.empty() && !EmittedModuleMacros) continue; + IdentMacroDirectivesOffsetMap[Name] = StartOffset; Stream.EmitRecord(PP_MACRO_DIRECTIVE_HISTORY, Record); Record.clear(); - - IdentMacroDirectivesOffsetMap[Name] = MacroDirectiveOffset; - - IdentID NameID = getIdentifierRef(Name); - ASTMacroTableTrait::Data data; - data.MacroDirectivesOffset = MacroDirectiveOffset; - Generator.insert(NameID, data); } /// \brief Offsets of each of the macros into the bitstream, indexed by @@ -2196,33 +2163,9 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) { Stream.ExitBlock(); - // Create the on-disk hash table in a buffer. - SmallString<4096> MacroTable; - uint32_t BucketOffset; - { - using namespace llvm::support; - llvm::raw_svector_ostream Out(MacroTable); - // Make sure that no bucket is at offset 0 - endian::Writer<little>(Out).write<uint32_t>(0); - BucketOffset = Generator.Emit(Out); - } - - // Write the macro table - using namespace llvm; - BitCodeAbbrev *Abbrev = new BitCodeAbbrev(); - Abbrev->Add(BitCodeAbbrevOp(MACRO_TABLE)); - Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); - Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); - unsigned MacroTableAbbrev = Stream.EmitAbbrev(Abbrev); - - Record.push_back(MACRO_TABLE); - Record.push_back(BucketOffset); - Stream.EmitRecordWithBlob(MacroTableAbbrev, Record, MacroTable.str()); - Record.clear(); - // Write the offsets table for macro IDs. using namespace llvm; - Abbrev = new BitCodeAbbrev(); + auto *Abbrev = new BitCodeAbbrev(); Abbrev->Add(BitCodeAbbrevOp(MACRO_OFFSET)); Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // # of macros Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // first ID @@ -2234,7 +2177,7 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) { Record.push_back(MacroOffsets.size()); Record.push_back(FirstMacroID - NUM_PREDEF_MACRO_IDS); Stream.EmitRecordWithBlob(MacroOffsetAbbrev, Record, - data(MacroOffsets)); + bytes(MacroOffsets)); } void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) { @@ -2274,13 +2217,13 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) { (void)++E, ++NumPreprocessingRecords, ++NextPreprocessorEntityID) { Record.clear(); - PreprocessedEntityOffsets.push_back(PPEntityOffset((*E)->getSourceRange(), - Stream.GetCurrentBitNo())); + PreprocessedEntityOffsets.push_back( + PPEntityOffset((*E)->getSourceRange(), Stream.GetCurrentBitNo())); - if (MacroDefinition *MD = dyn_cast<MacroDefinition>(*E)) { + if (MacroDefinitionRecord *MD = dyn_cast<MacroDefinitionRecord>(*E)) { // Record this macro definition's ID. MacroDefinitions[MD] = NextPreprocessorEntityID; - + AddIdentifierRef(MD->getName(), Record); Stream.EmitRecord(PPD_MACRO_DEFINITION, Record); continue; @@ -2332,7 +2275,7 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) { Record.push_back(PPD_ENTITIES_OFFSETS); Record.push_back(FirstPreprocessorEntityID - NUM_PREDEF_PP_ENTITY_IDS); Stream.EmitRecordWithBlob(PPEOffsetAbbrev, Record, - data(PreprocessedEntityOffsets)); + bytes(PreprocessedEntityOffsets)); } } @@ -2369,19 +2312,6 @@ static unsigned getNumberOfModules(Module *Mod) { } void ASTWriter::WriteSubmodules(Module *WritingModule) { - // Determine the dependencies of our module and each of it's submodules. - // FIXME: This feels like it belongs somewhere else, but there are no - // other consumers of this information. - SourceManager &SrcMgr = PP->getSourceManager(); - ModuleMap &ModMap = PP->getHeaderSearchInfo().getModuleMap(); - for (const auto *I : Context->local_imports()) { - if (Module *ImportedFrom - = ModMap.inferModuleFromLocation(FullSourceLoc(I->getLocation(), - SrcMgr))) { - ImportedFrom->Imports.push_back(I->getImportedModule()); - } - } - // Enter the submodule description block. Stream.EnterSubblock(SUBMODULE_BLOCK_ID, /*bits for abbreviations*/5); @@ -2509,16 +2439,16 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) { } // Emit the umbrella header, if there is one. - if (const FileEntry *UmbrellaHeader = Mod->getUmbrellaHeader()) { + if (auto UmbrellaHeader = Mod->getUmbrellaHeader()) { Record.clear(); Record.push_back(SUBMODULE_UMBRELLA_HEADER); - Stream.EmitRecordWithBlob(UmbrellaAbbrev, Record, - UmbrellaHeader->getName()); - } else if (const DirectoryEntry *UmbrellaDir = Mod->getUmbrellaDir()) { + Stream.EmitRecordWithBlob(UmbrellaAbbrev, Record, + UmbrellaHeader.NameAsWritten); + } else if (auto UmbrellaDir = Mod->getUmbrellaDir()) { Record.clear(); Record.push_back(SUBMODULE_UMBRELLA_DIR); Stream.EmitRecordWithBlob(UmbrellaDirAbbrev, Record, - UmbrellaDir->getName()); + UmbrellaDir.NameAsWritten); } // Emit the headers. @@ -2566,8 +2496,7 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) { Record.clear(); for (unsigned I = 0, N = Mod->Exports.size(); I != N; ++I) { if (Module *Exported = Mod->Exports[I].getPointer()) { - unsigned ExportedID = SubmoduleIDs[Exported]; - assert(ExportedID > 0 && "Unknown submodule ID?"); + unsigned ExportedID = getSubmoduleID(Exported); Record.push_back(ExportedID); } else { Record.push_back(0); @@ -2618,9 +2547,14 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) { } Stream.ExitBlock(); - - assert((NextSubmoduleID - FirstSubmoduleID - == getNumberOfModules(WritingModule)) && "Wrong # of submodules"); + + // FIXME: This can easily happen, if we have a reference to a submodule that + // did not result in us loading a module file for that submodule. For + // instance, a cross-top-level-module 'conflict' declaration will hit this. + assert((NextSubmoduleID - FirstSubmoduleID == + getNumberOfModules(WritingModule)) && + "Wrong # of submodules; found a reference to a non-local, " + "non-imported submodule?"); } serialization::SubmoduleID @@ -2684,6 +2618,29 @@ void ASTWriter::WritePragmaDiagnosticMappings(const DiagnosticsEngine &Diag, Stream.EmitRecord(DIAG_PRAGMA_MAPPINGS, Record); } +void ASTWriter::WriteCXXCtorInitializersOffsets() { + if (CXXCtorInitializersOffsets.empty()) + return; + + RecordData Record; + + // Create a blob abbreviation for the C++ ctor initializer offsets. + using namespace llvm; + + BitCodeAbbrev *Abbrev = new BitCodeAbbrev(); + Abbrev->Add(BitCodeAbbrevOp(CXX_CTOR_INITIALIZERS_OFFSETS)); + Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // size + Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); + unsigned CtorInitializersOffsetAbbrev = Stream.EmitAbbrev(Abbrev); + + // Write the base specifier offsets table. + Record.clear(); + Record.push_back(CXX_CTOR_INITIALIZERS_OFFSETS); + Record.push_back(CXXCtorInitializersOffsets.size()); + Stream.EmitRecordWithBlob(CtorInitializersOffsetAbbrev, Record, + bytes(CXXCtorInitializersOffsets)); +} + void ASTWriter::WriteCXXBaseSpecifiersOffsets() { if (CXXBaseSpecifiersOffsets.empty()) return; @@ -2704,7 +2661,7 @@ void ASTWriter::WriteCXXBaseSpecifiersOffsets() { Record.push_back(CXX_BASE_SPECIFIER_OFFSETS); Record.push_back(CXXBaseSpecifiersOffsets.size()); Stream.EmitRecordWithBlob(BaseSpecifierOffsetAbbrev, Record, - data(CXXBaseSpecifiersOffsets)); + bytes(CXXBaseSpecifiersOffsets)); } //===----------------------------------------------------------------------===// @@ -2780,7 +2737,7 @@ uint64_t ASTWriter::WriteDeclContextLexicalBlock(ASTContext &Context, Decls.push_back(std::make_pair(D->getKind(), GetDeclRef(D))); ++NumLexicalDeclContexts; - Stream.EmitRecordWithBlob(DeclContextLexicalAbbrev, Record, data(Decls)); + Stream.EmitRecordWithBlob(DeclContextLexicalAbbrev, Record, bytes(Decls)); return Offset; } @@ -2799,7 +2756,7 @@ void ASTWriter::WriteTypeDeclOffsets() { Record.push_back(TYPE_OFFSET); Record.push_back(TypeOffsets.size()); Record.push_back(FirstTypeID - NUM_PREDEF_TYPE_IDS); - Stream.EmitRecordWithBlob(TypeOffsetAbbrev, Record, data(TypeOffsets)); + Stream.EmitRecordWithBlob(TypeOffsetAbbrev, Record, bytes(TypeOffsets)); // Write the declaration offsets array Abbrev = new BitCodeAbbrev(); @@ -2812,22 +2769,25 @@ void ASTWriter::WriteTypeDeclOffsets() { Record.push_back(DECL_OFFSET); Record.push_back(DeclOffsets.size()); Record.push_back(FirstDeclID - NUM_PREDEF_DECL_IDS); - Stream.EmitRecordWithBlob(DeclOffsetAbbrev, Record, data(DeclOffsets)); + Stream.EmitRecordWithBlob(DeclOffsetAbbrev, Record, bytes(DeclOffsets)); } void ASTWriter::WriteFileDeclIDsMap() { using namespace llvm; RecordData Record; + SmallVector<std::pair<FileID, DeclIDInFileInfo *>, 64> SortedFileDeclIDs( + FileDeclIDs.begin(), FileDeclIDs.end()); + std::sort(SortedFileDeclIDs.begin(), SortedFileDeclIDs.end(), + llvm::less_first()); + // Join the vectors of DeclIDs from all files. - SmallVector<DeclID, 256> FileSortedIDs; - for (FileDeclIDsTy::iterator - FI = FileDeclIDs.begin(), FE = FileDeclIDs.end(); FI != FE; ++FI) { - DeclIDInFileInfo &Info = *FI->second; - Info.FirstDeclIndex = FileSortedIDs.size(); - for (LocDeclIDsTy::iterator - DI = Info.DeclIDs.begin(), DE = Info.DeclIDs.end(); DI != DE; ++DI) - FileSortedIDs.push_back(DI->second); + SmallVector<DeclID, 256> FileGroupedDeclIDs; + for (auto &FileDeclEntry : SortedFileDeclIDs) { + DeclIDInFileInfo &Info = *FileDeclEntry.second; + Info.FirstDeclIndex = FileGroupedDeclIDs.size(); + for (auto &LocDeclEntry : Info.DeclIDs) + FileGroupedDeclIDs.push_back(LocDeclEntry.second); } BitCodeAbbrev *Abbrev = new BitCodeAbbrev(); @@ -2836,8 +2796,8 @@ void ASTWriter::WriteFileDeclIDsMap() { Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); unsigned AbbrevCode = Stream.EmitAbbrev(Abbrev); Record.push_back(FILE_SORTED_DECLS); - Record.push_back(FileSortedIDs.size()); - Stream.EmitRecordWithBlob(AbbrevCode, Record, data(FileSortedIDs)); + Record.push_back(FileGroupedDeclIDs.size()); + Stream.EmitRecordWithBlob(AbbrevCode, Record, bytes(FileGroupedDeclIDs)); } void ASTWriter::WriteComments() { @@ -2988,13 +2948,12 @@ void ASTWriter::WriteSelectors(Sema &SemaRef) { // Create the on-disk hash table representation. We walk through every // selector we've seen and look it up in the method pool. SelectorOffsets.resize(NextSelectorID - FirstSelectorID); - for (llvm::DenseMap<Selector, SelectorID>::iterator - I = SelectorIDs.begin(), E = SelectorIDs.end(); - I != E; ++I) { - Selector S = I->first; + for (auto &SelectorAndID : SelectorIDs) { + Selector S = SelectorAndID.first; + SelectorID ID = SelectorAndID.second; Sema::GlobalMethodPool::iterator F = SemaRef.MethodPool.find(S); ASTMethodPoolTrait::data_type Data = { - I->second, + ID, ObjCMethodList(), ObjCMethodList() }; @@ -3004,7 +2963,7 @@ void ASTWriter::WriteSelectors(Sema &SemaRef) { } // Only write this selector if it's not in an existing AST or something // changed. - if (Chain && I->second < FirstSelectorID) { + if (Chain && ID < FirstSelectorID) { // Selector already exists. Did it change? bool changed = false; for (ObjCMethodList *M = &Data.Instance; @@ -3051,7 +3010,7 @@ void ASTWriter::WriteSelectors(Sema &SemaRef) { Record.push_back(METHOD_POOL); Record.push_back(BucketOffset); Record.push_back(NumTableEntries); - Stream.EmitRecordWithBlob(MethodPoolAbbrev, Record, MethodPool.str()); + Stream.EmitRecordWithBlob(MethodPoolAbbrev, Record, MethodPool); // Create a blob abbreviation for the selector table offsets. Abbrev = new BitCodeAbbrev(); @@ -3067,7 +3026,7 @@ void ASTWriter::WriteSelectors(Sema &SemaRef) { Record.push_back(SelectorOffsets.size()); Record.push_back(FirstSelectorID - NUM_PREDEF_SELECTOR_IDS); Stream.EmitRecordWithBlob(SelectorOffsetAbbrev, Record, - data(SelectorOffsets)); + bytes(SelectorOffsets)); } } @@ -3082,11 +3041,9 @@ void ASTWriter::WriteReferencedSelectorsPool(Sema &SemaRef) { // Note: this writes out all references even for a dependent AST. But it is // very tricky to fix, and given that @selector shouldn't really appear in // headers, probably not worth it. It's not a correctness issue. - for (DenseMap<Selector, SourceLocation>::iterator S = - SemaRef.ReferencedSelectors.begin(), - E = SemaRef.ReferencedSelectors.end(); S != E; ++S) { - Selector Sel = (*S).first; - SourceLocation Loc = (*S).second; + for (auto &SelectorAndLocation : SemaRef.ReferencedSelectors) { + Selector Sel = SelectorAndLocation.first; + SourceLocation Loc = SelectorAndLocation.second; AddSelectorRef(Sel, Record); AddSourceLocation(Loc, Record); } @@ -3097,174 +3054,59 @@ void ASTWriter::WriteReferencedSelectorsPool(Sema &SemaRef) { // Identifier Table Serialization //===----------------------------------------------------------------------===// +/// Determine the declaration that should be put into the name lookup table to +/// represent the given declaration in this module. This is usually D itself, +/// but if D was imported and merged into a local declaration, we want the most +/// recent local declaration instead. The chosen declaration will be the most +/// recent declaration in any module that imports this one. +static NamedDecl *getDeclForLocalLookup(const LangOptions &LangOpts, + NamedDecl *D) { + if (!LangOpts.Modules || !D->isFromASTFile()) + return D; + + if (Decl *Redecl = D->getPreviousDecl()) { + // For Redeclarable decls, a prior declaration might be local. + for (; Redecl; Redecl = Redecl->getPreviousDecl()) { + if (!Redecl->isFromASTFile()) + return cast<NamedDecl>(Redecl); + // If we find a decl from a (chained-)PCH stop since we won't find a + // local one. + if (D->getOwningModuleID() == 0) + break; + } + } else if (Decl *First = D->getCanonicalDecl()) { + // For Mergeable decls, the first decl might be local. + if (!First->isFromASTFile()) + return cast<NamedDecl>(First); + } + + // All declarations are imported. Our most recent declaration will also be + // the most recent one in anyone who imports us. + return D; +} + namespace { class ASTIdentifierTableTrait { ASTWriter &Writer; Preprocessor &PP; IdentifierResolver &IdResolver; - bool IsModule; - /// \brief Determines whether this is an "interesting" identifier - /// that needs a full IdentifierInfo structure written into the hash - /// table. - bool isInterestingIdentifier(IdentifierInfo *II, MacroDirective *&Macro) { - if (II->isPoisoned() || + /// \brief Determines whether this is an "interesting" identifier that needs a + /// full IdentifierInfo structure written into the hash table. Notably, this + /// doesn't check whether the name has macros defined; use PublicMacroIterator + /// to check that. + bool isInterestingIdentifier(IdentifierInfo *II, uint64_t MacroOffset) { + if (MacroOffset || + II->isPoisoned() || II->isExtensionToken() || II->getObjCOrBuiltinID() || II->hasRevertedTokenIDToIdentifier() || II->getFETokenInfo<void>()) return true; - return hadMacroDefinition(II, Macro); - } - - bool hadMacroDefinition(IdentifierInfo *II, MacroDirective *&Macro) { - if (!II->hadMacroDefinition()) - return false; - - if (Macro || (Macro = PP.getMacroDirectiveHistory(II))) { - if (!IsModule) - return !shouldIgnoreMacro(Macro, IsModule, PP); - - MacroState State; - if (getFirstPublicSubmoduleMacro(Macro, State)) - return true; - } - return false; } - enum class SubmoduleMacroState { - /// We've seen nothing about this macro. - None, - /// We've seen a public visibility directive. - Public, - /// We've either exported a macro for this module or found that the - /// module's definition of this macro is private. - Done - }; - typedef llvm::DenseMap<SubmoduleID, SubmoduleMacroState> MacroState; - - MacroDirective * - getFirstPublicSubmoduleMacro(MacroDirective *MD, MacroState &State) { - if (MacroDirective *NextMD = getPublicSubmoduleMacro(MD, State)) - return NextMD; - return nullptr; - } - - MacroDirective * - getNextPublicSubmoduleMacro(MacroDirective *MD, MacroState &State) { - if (MacroDirective *NextMD = - getPublicSubmoduleMacro(MD->getPrevious(), State)) - return NextMD; - return nullptr; - } - - /// \brief Traverses the macro directives history and returns the next - /// public macro definition or undefinition that has not been found so far. - /// - /// A macro that is defined in submodule A and undefined in submodule B - /// will still be considered as defined/exported from submodule A. - MacroDirective *getPublicSubmoduleMacro(MacroDirective *MD, - MacroState &State) { - if (!MD) - return nullptr; - - Optional<bool> IsPublic; - for (; MD; MD = MD->getPrevious()) { - // Once we hit an ignored macro, we're done: the rest of the chain - // will all be ignored macros. - if (shouldIgnoreMacro(MD, IsModule, PP)) - break; - - // If this macro was imported, re-export it. - if (MD->isImported()) - return MD; - - SubmoduleID ModID = getSubmoduleID(MD); - auto &S = State[ModID]; - assert(ModID && "found macro in no submodule"); - - if (S == SubmoduleMacroState::Done) - continue; - - if (auto *VisMD = dyn_cast<VisibilityMacroDirective>(MD)) { - // The latest visibility directive for a name in a submodule affects all - // the directives that come before it. - if (S == SubmoduleMacroState::None) - S = VisMD->isPublic() ? SubmoduleMacroState::Public - : SubmoduleMacroState::Done; - } else { - S = SubmoduleMacroState::Done; - return MD; - } - } - - return nullptr; - } - - ArrayRef<SubmoduleID> - getOverriddenSubmodules(MacroDirective *MD, - SmallVectorImpl<SubmoduleID> &ScratchSpace) { - assert(!isa<VisibilityMacroDirective>(MD) && - "only #define and #undef can override"); - if (MD->isImported()) - return MD->getOverriddenModules(); - - ScratchSpace.clear(); - SubmoduleID ModID = getSubmoduleID(MD); - for (MD = MD->getPrevious(); MD; MD = MD->getPrevious()) { - if (shouldIgnoreMacro(MD, IsModule, PP)) - break; - - // If this is a definition from a submodule import, that submodule's - // definition is overridden by the definition or undefinition that we - // started with. - if (MD->isImported()) { - if (auto *DefMD = dyn_cast<DefMacroDirective>(MD)) { - SubmoduleID DefModuleID = DefMD->getInfo()->getOwningModuleID(); - assert(DefModuleID && "imported macro has no owning module"); - ScratchSpace.push_back(DefModuleID); - } else if (auto *UndefMD = dyn_cast<UndefMacroDirective>(MD)) { - // If we override a #undef, we override anything that #undef overrides. - // We don't need to override it, since an active #undef doesn't affect - // the meaning of a macro. - auto Overrides = UndefMD->getOverriddenModules(); - ScratchSpace.insert(ScratchSpace.end(), - Overrides.begin(), Overrides.end()); - } - } - - // Stop once we leave the original macro's submodule. - // - // Either this submodule #included another submodule of the same - // module or it just happened to be built after the other module. - // In the former case, we override the submodule's macro. - // - // FIXME: In the latter case, we shouldn't do so, but we can't tell - // these cases apart. - // - // FIXME: We can leave this submodule and re-enter it if it #includes a - // header within a different submodule of the same module. In such cases - // the overrides list will be incomplete. - SubmoduleID DirectiveModuleID = getSubmoduleID(MD); - if (DirectiveModuleID != ModID) { - if (DirectiveModuleID && !MD->isImported()) - ScratchSpace.push_back(DirectiveModuleID); - break; - } - } - - std::sort(ScratchSpace.begin(), ScratchSpace.end()); - ScratchSpace.erase(std::unique(ScratchSpace.begin(), ScratchSpace.end()), - ScratchSpace.end()); - return ScratchSpace; - } - - SubmoduleID getSubmoduleID(MacroDirective *MD) { - return Writer.inferSubmoduleIDFromLocation(MD->getLocation()); - } - public: typedef IdentifierInfo* key_type; typedef key_type key_type_ref; @@ -3275,9 +3117,9 @@ public: typedef unsigned hash_value_type; typedef unsigned offset_type; - ASTIdentifierTableTrait(ASTWriter &Writer, Preprocessor &PP, - IdentifierResolver &IdResolver, bool IsModule) - : Writer(Writer), PP(PP), IdResolver(IdResolver), IsModule(IsModule) { } + ASTIdentifierTableTrait(ASTWriter &Writer, Preprocessor &PP, + IdentifierResolver &IdResolver) + : Writer(Writer), PP(PP), IdResolver(IdResolver) {} static hash_value_type ComputeHash(const IdentifierInfo* II) { return llvm::HashString(II->getName()); @@ -3287,25 +3129,12 @@ public: EmitKeyDataLength(raw_ostream& Out, IdentifierInfo* II, IdentID ID) { unsigned KeyLen = II->getLength() + 1; unsigned DataLen = 4; // 4 bytes for the persistent ID << 1 - MacroDirective *Macro = nullptr; - if (isInterestingIdentifier(II, Macro)) { + auto MacroOffset = Writer.getMacroDirectivesOffset(II); + if (isInterestingIdentifier(II, MacroOffset)) { DataLen += 2; // 2 bytes for builtin ID DataLen += 2; // 2 bytes for flags - if (hadMacroDefinition(II, Macro)) { + if (MacroOffset) DataLen += 4; // MacroDirectives offset. - if (IsModule) { - MacroState State; - SmallVector<SubmoduleID, 16> Scratch; - for (MacroDirective *MD = getFirstPublicSubmoduleMacro(Macro, State); - MD; MD = getNextPublicSubmoduleMacro(MD, State)) { - DataLen += 4; // MacroInfo ID or ModuleID. - if (unsigned NumOverrides = - getOverriddenSubmodules(MD, Scratch).size()) - DataLen += 4 * (1 + NumOverrides); - } - DataLen += 4; // 0 terminator. - } - } for (IdentifierResolver::iterator D = IdResolver.begin(II), DEnd = IdResolver.end(); @@ -3315,6 +3144,7 @@ public: using namespace llvm::support; endian::Writer<little> LE(Out); + assert((uint16_t)DataLen == DataLen && (uint16_t)KeyLen == KeyLen); LE.write<uint16_t>(DataLen); // We emit the key length after the data length so that every // string is preceded by a 16-bit length. This matches the PTH @@ -3331,25 +3161,13 @@ public: Out.write(II->getNameStart(), KeyLen); } - static void emitMacroOverrides(raw_ostream &Out, - ArrayRef<SubmoduleID> Overridden) { - if (!Overridden.empty()) { - using namespace llvm::support; - endian::Writer<little> LE(Out); - LE.write<uint32_t>(Overridden.size() | 0x80000000U); - for (unsigned I = 0, N = Overridden.size(); I != N; ++I) { - assert(Overridden[I] && "zero module ID for override"); - LE.write<uint32_t>(Overridden[I]); - } - } - } - void EmitData(raw_ostream& Out, IdentifierInfo* II, IdentID ID, unsigned) { using namespace llvm::support; endian::Writer<little> LE(Out); - MacroDirective *Macro = nullptr; - if (!isInterestingIdentifier(II, Macro)) { + + auto MacroOffset = Writer.getMacroDirectivesOffset(II); + if (!isInterestingIdentifier(II, MacroOffset)) { LE.write<uint32_t>(ID << 1); return; } @@ -3359,77 +3177,29 @@ public: assert((Bits & 0xffff) == Bits && "ObjCOrBuiltinID too big for ASTReader."); LE.write<uint16_t>(Bits); Bits = 0; - bool HadMacroDefinition = hadMacroDefinition(II, Macro); + bool HadMacroDefinition = MacroOffset != 0; Bits = (Bits << 1) | unsigned(HadMacroDefinition); - Bits = (Bits << 1) | unsigned(IsModule); Bits = (Bits << 1) | unsigned(II->isExtensionToken()); Bits = (Bits << 1) | unsigned(II->isPoisoned()); Bits = (Bits << 1) | unsigned(II->hasRevertedTokenIDToIdentifier()); Bits = (Bits << 1) | unsigned(II->isCPlusPlusOperatorKeyword()); LE.write<uint16_t>(Bits); - if (HadMacroDefinition) { - LE.write<uint32_t>(Writer.getMacroDirectivesOffset(II)); - if (IsModule) { - // Write the IDs of macros coming from different submodules. - MacroState State; - SmallVector<SubmoduleID, 16> Scratch; - for (MacroDirective *MD = getFirstPublicSubmoduleMacro(Macro, State); - MD; MD = getNextPublicSubmoduleMacro(MD, State)) { - if (DefMacroDirective *DefMD = dyn_cast<DefMacroDirective>(MD)) { - // FIXME: If this macro directive was created by #pragma pop_macros, - // or if it was created implicitly by resolving conflicting macros, - // it may be for a different submodule from the one in the MacroInfo - // object. If so, we should write out its owning ModuleID. - MacroID InfoID = Writer.getMacroID(DefMD->getInfo()); - assert(InfoID); - LE.write<uint32_t>(InfoID << 1); - } else { - auto *UndefMD = cast<UndefMacroDirective>(MD); - SubmoduleID Mod = UndefMD->isImported() - ? UndefMD->getOwningModuleID() - : getSubmoduleID(UndefMD); - LE.write<uint32_t>((Mod << 1) | 1); - } - emitMacroOverrides(Out, getOverriddenSubmodules(MD, Scratch)); - } - LE.write<uint32_t>(0xdeadbeef); - } - } + if (HadMacroDefinition) + LE.write<uint32_t>(MacroOffset); // Emit the declaration IDs in reverse order, because the // IdentifierResolver provides the declarations as they would be // visible (e.g., the function "stat" would come before the struct - // "stat"), but the ASTReader adds declarations to the end of the list - // (so we need to see the struct "status" before the function "status"). + // "stat"), but the ASTReader adds declarations to the end of the list + // (so we need to see the struct "stat" before the function "stat"). // Only emit declarations that aren't from a chained PCH, though. - SmallVector<Decl *, 16> Decls(IdResolver.begin(II), - IdResolver.end()); - for (SmallVectorImpl<Decl *>::reverse_iterator D = Decls.rbegin(), - DEnd = Decls.rend(); + SmallVector<NamedDecl *, 16> Decls(IdResolver.begin(II), IdResolver.end()); + for (SmallVectorImpl<NamedDecl *>::reverse_iterator D = Decls.rbegin(), + DEnd = Decls.rend(); D != DEnd; ++D) - LE.write<uint32_t>(Writer.getDeclID(getMostRecentLocalDecl(*D))); - } - - /// \brief Returns the most recent local decl or the given decl if there are - /// no local ones. The given decl is assumed to be the most recent one. - Decl *getMostRecentLocalDecl(Decl *Orig) { - // The only way a "from AST file" decl would be more recent from a local one - // is if it came from a module. - if (!PP.getLangOpts().Modules) - return Orig; - - // Look for a local in the decl chain. - for (Decl *D = Orig; D; D = D->getPreviousDecl()) { - if (!D->isFromASTFile()) - return D; - // If we come up a decl from a (chained-)PCH stop since we won't find a - // local one. - if (D->getOwningModuleID() == 0) - break; - } - - return Orig; + LE.write<uint32_t>( + Writer.getDeclID(getDeclForLocalLookup(PP.getLangOpts(), *D))); } }; } // end anonymous namespace @@ -3448,29 +3218,33 @@ void ASTWriter::WriteIdentifierTable(Preprocessor &PP, // strings. { llvm::OnDiskChainedHashTableGenerator<ASTIdentifierTableTrait> Generator; - ASTIdentifierTableTrait Trait(*this, PP, IdResolver, IsModule); + ASTIdentifierTableTrait Trait(*this, PP, IdResolver); // Look for any identifiers that were named while processing the // headers, but are otherwise not needed. We add these to the hash // table to enable checking of the predefines buffer in the case // where the user adds new macro definitions when building the AST // file. + SmallVector<const IdentifierInfo *, 128> IIs; for (IdentifierTable::iterator ID = PP.getIdentifierTable().begin(), IDEnd = PP.getIdentifierTable().end(); ID != IDEnd; ++ID) - getIdentifierRef(ID->second); + IIs.push_back(ID->second); + // Sort the identifiers lexicographically before getting them references so + // that their order is stable. + std::sort(IIs.begin(), IIs.end(), llvm::less_ptr<IdentifierInfo>()); + for (const IdentifierInfo *II : IIs) + getIdentifierRef(II); // Create the on-disk hash table representation. We only store offsets // for identifiers that appear here for the first time. IdentifierOffsets.resize(NextIdentID - FirstIdentID); - for (llvm::DenseMap<const IdentifierInfo *, IdentID>::iterator - ID = IdentifierIDs.begin(), IDEnd = IdentifierIDs.end(); - ID != IDEnd; ++ID) { - assert(ID->first && "NULL identifier in identifier table"); - if (!Chain || !ID->first->isFromAST() || - ID->first->hasChangedSinceDeserialization()) - Generator.insert(const_cast<IdentifierInfo *>(ID->first), ID->second, - Trait); + for (auto IdentIDPair : IdentifierIDs) { + IdentifierInfo *II = const_cast<IdentifierInfo *>(IdentIDPair.first); + IdentID ID = IdentIDPair.second; + assert(II && "NULL identifier in identifier table"); + if (!Chain || !II->isFromAST() || II->hasChangedSinceDeserialization()) + Generator.insert(II, ID, Trait); } // Create the on-disk hash table in a buffer. @@ -3478,7 +3252,6 @@ void ASTWriter::WriteIdentifierTable(Preprocessor &PP, uint32_t BucketOffset; { using namespace llvm::support; - ASTIdentifierTableTrait Trait(*this, PP, IdResolver, IsModule); llvm::raw_svector_ostream Out(IdentifierTable); // Make sure that no bucket is at offset 0 endian::Writer<little>(Out).write<uint32_t>(0); @@ -3496,7 +3269,7 @@ void ASTWriter::WriteIdentifierTable(Preprocessor &PP, RecordData Record; Record.push_back(IDENTIFIER_TABLE); Record.push_back(BucketOffset); - Stream.EmitRecordWithBlob(IDTableAbbrev, Record, IdentifierTable.str()); + Stream.EmitRecordWithBlob(IDTableAbbrev, Record, IdentifierTable); } // Write the offsets table for identifier IDs. @@ -3517,38 +3290,13 @@ void ASTWriter::WriteIdentifierTable(Preprocessor &PP, Record.push_back(IdentifierOffsets.size()); Record.push_back(FirstIdentID - NUM_PREDEF_IDENT_IDS); Stream.EmitRecordWithBlob(IdentifierOffsetAbbrev, Record, - data(IdentifierOffsets)); + bytes(IdentifierOffsets)); } //===----------------------------------------------------------------------===// // DeclContext's Name Lookup Table Serialization //===----------------------------------------------------------------------===// -/// Determine the declaration that should be put into the name lookup table to -/// represent the given declaration in this module. This is usually D itself, -/// but if D was imported and merged into a local declaration, we want the most -/// recent local declaration instead. The chosen declaration will be the most -/// recent declaration in any module that imports this one. -static NamedDecl *getDeclForLocalLookup(NamedDecl *D) { - if (!D->isFromASTFile()) - return D; - - if (Decl *Redecl = D->getPreviousDecl()) { - // For Redeclarable decls, a prior declaration might be local. - for (; Redecl; Redecl = Redecl->getPreviousDecl()) - if (!Redecl->isFromASTFile()) - return cast<NamedDecl>(Redecl); - } else if (Decl *First = D->getCanonicalDecl()) { - // For Mergeable decls, the first decl might be local. - if (!First->isFromASTFile()) - return cast<NamedDecl>(First); - } - - // All declarations are imported. Our most recent declaration will also be - // the most recent one in anyone who imports us. - return D; -} - namespace { // Trait used for the on-disk hash table used in the method pool. class ASTDeclContextNameLookupTrait { @@ -3666,119 +3414,198 @@ public: LE.write<uint16_t>(Lookup.size()); for (DeclContext::lookup_iterator I = Lookup.begin(), E = Lookup.end(); I != E; ++I) - LE.write<uint32_t>(Writer.GetDeclRef(getDeclForLocalLookup(*I))); + LE.write<uint32_t>( + Writer.GetDeclRef(getDeclForLocalLookup(Writer.getLangOpts(), *I))); assert(Out.tell() - Start == DataLen && "Data length is wrong"); } }; } // end anonymous namespace -template<typename Visitor> -static void visitLocalLookupResults(const DeclContext *ConstDC, - bool NeedToReconcileExternalVisibleStorage, - Visitor AddLookupResult) { +bool ASTWriter::isLookupResultExternal(StoredDeclsList &Result, + DeclContext *DC) { + return Result.hasExternalDecls() && DC->NeedToReconcileExternalVisibleStorage; +} + +bool ASTWriter::isLookupResultEntirelyExternal(StoredDeclsList &Result, + DeclContext *DC) { + for (auto *D : Result.getLookupResult()) + if (!getDeclForLocalLookup(getLangOpts(), D)->isFromASTFile()) + return false; + + return true; +} + +uint32_t +ASTWriter::GenerateNameLookupTable(const DeclContext *ConstDC, + llvm::SmallVectorImpl<char> &LookupTable) { + assert(!ConstDC->HasLazyLocalLexicalLookups && + !ConstDC->HasLazyExternalLexicalLookups && + "must call buildLookups first"); + // FIXME: We need to build the lookups table, which is logically const. DeclContext *DC = const_cast<DeclContext*>(ConstDC); assert(DC == DC->getPrimaryContext() && "only primary DC has lookup table"); - SmallVector<DeclarationName, 16> ExternalNames; + // Create the on-disk hash table representation. + llvm::OnDiskChainedHashTableGenerator<ASTDeclContextNameLookupTrait> + Generator; + ASTDeclContextNameLookupTrait Trait(*this); + + // The first step is to collect the declaration names which we need to + // serialize into the name lookup table, and to collect them in a stable + // order. + SmallVector<DeclarationName, 16> Names; + + // We also build up small sets of the constructor and conversion function + // names which are visible. + llvm::SmallSet<DeclarationName, 8> ConstructorNameSet, ConversionNameSet; + for (auto &Lookup : *DC->buildLookup()) { - if (Lookup.second.hasExternalDecls() || - NeedToReconcileExternalVisibleStorage) { - // We don't know for sure what declarations are found by this name, - // because the external source might have a different set from the set - // that are in the lookup map, and we can't update it now without - // risking invalidating our lookup iterator. So add it to a queue to - // deal with later. - ExternalNames.push_back(Lookup.first); + auto &Name = Lookup.first; + auto &Result = Lookup.second; + + // If there are no local declarations in our lookup result, we don't + // need to write an entry for the name at all unless we're rewriting + // the decl context. If we can't write out a lookup set without + // performing more deserialization, just skip this entry. + if (isLookupResultExternal(Result, DC) && !isRewritten(cast<Decl>(DC)) && + isLookupResultEntirelyExternal(Result, DC)) continue; - } - AddLookupResult(Lookup.first, Lookup.second.getLookupResult()); - } + // We also skip empty results. If any of the results could be external and + // the currently available results are empty, then all of the results are + // external and we skip it above. So the only way we get here with an empty + // results is when no results could have been external *and* we have + // external results. + // + // FIXME: While we might want to start emitting on-disk entries for negative + // lookups into a decl context as an optimization, today we *have* to skip + // them because there are names with empty lookup results in decl contexts + // which we can't emit in any stable ordering: we lookup constructors and + // conversion functions in the enclosing namespace scope creating empty + // results for them. This in almost certainly a bug in Clang's name lookup, + // but that is likely to be hard or impossible to fix and so we tolerate it + // here by omitting lookups with empty results. + if (Lookup.second.getLookupResult().empty()) + continue; - // Add the names we needed to defer. Note, this shouldn't add any new decls - // to the list we need to serialize: any new declarations we find here should - // be imported from an external source. - // FIXME: What if the external source isn't an ASTReader? - for (const auto &Name : ExternalNames) - AddLookupResult(Name, DC->lookup(Name)); -} - -void ASTWriter::AddUpdatedDeclContext(const DeclContext *DC) { - if (UpdatedDeclContexts.insert(DC).second && WritingAST) { - // Ensure we emit all the visible declarations. - visitLocalLookupResults(DC, DC->NeedToReconcileExternalVisibleStorage, - [&](DeclarationName Name, - DeclContext::lookup_const_result Result) { - for (auto *Decl : Result) - GetDeclRef(getDeclForLocalLookup(Decl)); - }); + switch (Lookup.first.getNameKind()) { + default: + Names.push_back(Lookup.first); + break; + + case DeclarationName::CXXConstructorName: + assert(isa<CXXRecordDecl>(DC) && + "Cannot have a constructor name outside of a class!"); + ConstructorNameSet.insert(Name); + break; + + case DeclarationName::CXXConversionFunctionName: + assert(isa<CXXRecordDecl>(DC) && + "Cannot have a conversion function name outside of a class!"); + ConversionNameSet.insert(Name); + break; + } } -} -uint32_t -ASTWriter::GenerateNameLookupTable(const DeclContext *DC, - llvm::SmallVectorImpl<char> &LookupTable) { - assert(!DC->LookupPtr.getInt() && "must call buildLookups first"); + // Sort the names into a stable order. + std::sort(Names.begin(), Names.end()); + + if (auto *D = dyn_cast<CXXRecordDecl>(DC)) { + // We need to establish an ordering of constructor and conversion function + // names, and they don't have an intrinsic ordering. + + // First we try the easy case by forming the current context's constructor + // name and adding that name first. This is a very useful optimization to + // avoid walking the lexical declarations in many cases, and it also + // handles the only case where a constructor name can come from some other + // lexical context -- when that name is an implicit constructor merged from + // another declaration in the redecl chain. Any non-implicit constructor or + // conversion function which doesn't occur in all the lexical contexts + // would be an ODR violation. + auto ImplicitCtorName = Context->DeclarationNames.getCXXConstructorName( + Context->getCanonicalType(Context->getRecordType(D))); + if (ConstructorNameSet.erase(ImplicitCtorName)) + Names.push_back(ImplicitCtorName); + + // If we still have constructors or conversion functions, we walk all the + // names in the decl and add the constructors and conversion functions + // which are visible in the order they lexically occur within the context. + if (!ConstructorNameSet.empty() || !ConversionNameSet.empty()) + for (Decl *ChildD : cast<CXXRecordDecl>(DC)->decls()) + if (auto *ChildND = dyn_cast<NamedDecl>(ChildD)) { + auto Name = ChildND->getDeclName(); + switch (Name.getNameKind()) { + default: + continue; + + case DeclarationName::CXXConstructorName: + if (ConstructorNameSet.erase(Name)) + Names.push_back(Name); + break; + + case DeclarationName::CXXConversionFunctionName: + if (ConversionNameSet.erase(Name)) + Names.push_back(Name); + break; + } - llvm::OnDiskChainedHashTableGenerator<ASTDeclContextNameLookupTrait> - Generator; - ASTDeclContextNameLookupTrait Trait(*this); + if (ConstructorNameSet.empty() && ConversionNameSet.empty()) + break; + } - // Create the on-disk hash table representation. - DeclarationName ConstructorName; - DeclarationName ConversionName; + assert(ConstructorNameSet.empty() && "Failed to find all of the visible " + "constructors by walking all the " + "lexical members of the context."); + assert(ConversionNameSet.empty() && "Failed to find all of the visible " + "conversion functions by walking all " + "the lexical members of the context."); + } + + // Next we need to do a lookup with each name into this decl context to fully + // populate any results from external sources. We don't actually use the + // results of these lookups because we only want to use the results after all + // results have been loaded and the pointers into them will be stable. + for (auto &Name : Names) + DC->lookup(Name); + + // Now we need to insert the results for each name into the hash table. For + // constructor names and conversion function names, we actually need to merge + // all of the results for them into one list of results each and insert + // those. SmallVector<NamedDecl *, 8> ConstructorDecls; - SmallVector<NamedDecl *, 4> ConversionDecls; + SmallVector<NamedDecl *, 8> ConversionDecls; - visitLocalLookupResults(DC, DC->NeedToReconcileExternalVisibleStorage, - [&](DeclarationName Name, - DeclContext::lookup_result Result) { - if (Result.empty()) - return; + // Now loop over the names, either inserting them or appending for the two + // special cases. + for (auto &Name : Names) { + DeclContext::lookup_result Result = DC->noload_lookup(Name); - // Different DeclarationName values of certain kinds are mapped to - // identical serialized keys, because we don't want to use type - // identifiers in the keys (since type ids are local to the module). switch (Name.getNameKind()) { + default: + Generator.insert(Name, Result, Trait); + break; + case DeclarationName::CXXConstructorName: - // There may be different CXXConstructorName DeclarationName values - // in a DeclContext because a UsingDecl that inherits constructors - // has the DeclarationName of the inherited constructors. - if (!ConstructorName) - ConstructorName = Name; ConstructorDecls.append(Result.begin(), Result.end()); - return; + break; case DeclarationName::CXXConversionFunctionName: - if (!ConversionName) - ConversionName = Name; ConversionDecls.append(Result.begin(), Result.end()); - return; - - default: break; } - - Generator.insert(Name, Result, Trait); - }); - - // Add the constructors. - if (!ConstructorDecls.empty()) { - Generator.insert(ConstructorName, - DeclContext::lookup_result(ConstructorDecls.begin(), - ConstructorDecls.end()), - Trait); } - // Add the conversion functions. - if (!ConversionDecls.empty()) { - Generator.insert(ConversionName, - DeclContext::lookup_result(ConversionDecls.begin(), - ConversionDecls.end()), - Trait); - } + // Handle our two special cases if we ended up having any. We arbitrarily use + // the first declaration's name here because the name itself isn't part of + // the key, only the kind of name is used. + if (!ConstructorDecls.empty()) + Generator.insert(ConstructorDecls.front()->getDeclName(), + DeclContext::lookup_result(ConstructorDecls), Trait); + if (!ConversionDecls.empty()) + Generator.insert(ConversionDecls.front()->getDeclName(), + DeclContext::lookup_result(ConversionDecls), Trait); // Create the on-disk hash table in a buffer. llvm::raw_svector_ostream Out(LookupTable); @@ -3798,9 +3625,8 @@ uint64_t ASTWriter::WriteDeclContextVisibleBlock(ASTContext &Context, if (DC->getPrimaryContext() != DC) return 0; - // Since there is no name lookup into functions or methods, don't bother to - // build a visible-declarations table for these entities. - if (DC->isFunctionOrMethod()) + // Skip contexts which don't support name lookup. + if (!DC->isLookupContext()) return 0; // If not in C++, we perform name lookup for the translation unit via the @@ -3827,7 +3653,7 @@ uint64_t ASTWriter::WriteDeclContextVisibleBlock(ASTContext &Context, Record.push_back(DECL_CONTEXT_VISIBLE); Record.push_back(BucketOffset); Stream.EmitRecordWithBlob(DeclContextVisibleLookupAbbrev, Record, - LookupTable.str()); + LookupTable); ++NumVisibleDeclContexts; return Offset; } @@ -3852,7 +3678,7 @@ void ASTWriter::WriteDeclContextVisibleUpdate(const DeclContext *DC) { Record.push_back(UPDATE_VISIBLE); Record.push_back(getDeclID(cast<Decl>(DC))); Record.push_back(BucketOffset); - Stream.EmitRecordWithBlob(UpdateVisibleAbbrev, Record, LookupTable.str()); + Stream.EmitRecordWithBlob(UpdateVisibleAbbrev, Record, LookupTable); } /// \brief Write an FP_PRAGMA_OPTIONS block for the given FPOptions. @@ -3902,18 +3728,6 @@ void ASTWriter::WriteRedeclarations() { } } - if (!First->isFromASTFile() && Chain) { - Decl *FirstFromAST = MostRecent; - for (Decl *Prev = MostRecent; Prev; Prev = Prev->getPreviousDecl()) { - if (Prev->isFromASTFile()) - FirstFromAST = Prev; - } - - // FIXME: Do we need to do this for the first declaration from each - // redeclaration chain that was merged into this one? - Chain->MergedDecls[FirstFromAST].push_back(getDeclID(First)); - } - LocalRedeclChains[Offset] = Size; // Reverse the set of local redeclarations, so that we store them in @@ -4008,25 +3822,6 @@ void ASTWriter::WriteObjCCategories() { Stream.EmitRecord(OBJC_CATEGORIES, Categories); } -void ASTWriter::WriteMergedDecls() { - if (!Chain || Chain->MergedDecls.empty()) - return; - - RecordData Record; - for (ASTReader::MergedDeclsMap::iterator I = Chain->MergedDecls.begin(), - IEnd = Chain->MergedDecls.end(); - I != IEnd; ++I) { - DeclID CanonID = I->first->isFromASTFile()? I->first->getGlobalID() - : GetDeclRef(I->first); - assert(CanonID && "Merged declaration not known?"); - - Record.push_back(CanonID); - Record.push_back(I->second.size()); - Record.append(I->second.begin(), I->second.end()); - } - Stream.EmitRecord(MERGED_DECLARATIONS, Record); -} - void ASTWriter::WriteLateParsedTemplates(Sema &SemaRef) { Sema::LateParsedTemplateMapT &LPTMap = SemaRef.LateParsedTemplateMap; @@ -4034,11 +3829,10 @@ void ASTWriter::WriteLateParsedTemplates(Sema &SemaRef) { return; RecordData Record; - for (Sema::LateParsedTemplateMapT::iterator It = LPTMap.begin(), - ItEnd = LPTMap.end(); - It != ItEnd; ++It) { - LateParsedTemplate *LPT = It->second; - AddDeclRef(It->first, Record); + for (auto LPTMapEntry : LPTMap) { + const FunctionDecl *FD = LPTMapEntry.first; + LateParsedTemplate *LPT = LPTMapEntry.second; + AddDeclRef(FD, Record); AddDeclRef(LPT->D, Record); Record.push_back(LPT->Toks.size()); @@ -4175,7 +3969,8 @@ ASTWriter::ASTWriter(llvm::BitstreamWriter &Stream) FirstSelectorID(NUM_PREDEF_SELECTOR_IDS), NextSelectorID(FirstSelectorID), CollectedStmts(&StmtsToEmit), NumStatements(0), NumMacros(0), NumLexicalDeclContexts(0), NumVisibleDeclContexts(0), - NextCXXBaseSpecifiersID(1), TypeExtQualAbbrev(0), + NextCXXBaseSpecifiersID(1), NextCXXCtorInitializersID(1), + TypeExtQualAbbrev(0), TypeFunctionProtoAbbrev(0), DeclParmVarAbbrev(0), DeclContextLexicalAbbrev(0), DeclContextVisibleLookupAbbrev(0), UpdateVisibleAbbrev(0), DeclRecordAbbrev(0), DeclTypedefAbbrev(0), @@ -4188,6 +3983,11 @@ ASTWriter::~ASTWriter() { llvm::DeleteContainerSeconds(FileDeclIDs); } +const LangOptions &ASTWriter::getLangOpts() const { + assert(WritingAST && "can't determine lang opts when not writing AST"); + return Context->getLangOpts(); +} + void ASTWriter::WriteAST(Sema &SemaRef, const std::string &OutputFile, Module *WritingModule, StringRef isysroot, @@ -4258,47 +4058,8 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, DeclIDs[Context.ObjCInstanceTypeDecl] = PREDEF_DECL_OBJC_INSTANCETYPE_ID; if (Context.BuiltinVaListDecl) DeclIDs[Context.getBuiltinVaListDecl()] = PREDEF_DECL_BUILTIN_VA_LIST_ID; - - if (!Chain) { - // Make sure that we emit IdentifierInfos (and any attached - // declarations) for builtins. We don't need to do this when we're - // emitting chained PCH files, because all of the builtins will be - // in the original PCH file. - // FIXME: Modules won't like this at all. - IdentifierTable &Table = PP.getIdentifierTable(); - SmallVector<const char *, 32> BuiltinNames; - if (!Context.getLangOpts().NoBuiltin) { - Context.BuiltinInfo.GetBuiltinNames(BuiltinNames); - } - for (unsigned I = 0, N = BuiltinNames.size(); I != N; ++I) - getIdentifierRef(&Table.get(BuiltinNames[I])); - } - - // If there are any out-of-date identifiers, bring them up to date. - if (ExternalPreprocessorSource *ExtSource = PP.getExternalSource()) { - // Find out-of-date identifiers. - SmallVector<IdentifierInfo *, 4> OutOfDate; - for (IdentifierTable::iterator ID = PP.getIdentifierTable().begin(), - IDEnd = PP.getIdentifierTable().end(); - ID != IDEnd; ++ID) { - if (ID->second->isOutOfDate()) - OutOfDate.push_back(ID->second); - } - - // Update the out-of-date identifiers. - for (unsigned I = 0, N = OutOfDate.size(); I != N; ++I) { - ExtSource->updateOutOfDateIdentifier(*OutOfDate[I]); - } - } - - // If we saw any DeclContext updates before we started writing the AST file, - // make sure all visible decls in those DeclContexts are written out. - if (!UpdatedDeclContexts.empty()) { - auto OldUpdatedDeclContexts = std::move(UpdatedDeclContexts); - UpdatedDeclContexts.clear(); - for (auto *DC : OldUpdatedDeclContexts) - AddUpdatedDeclContext(DC); - } + if (Context.ExternCContext) + DeclIDs[Context.ExternCContext] = PREDEF_DECL_EXTERN_C_CONTEXT_ID; // Build a record containing all of the tentative definitions in this file, in // TentativeDefinitions order. Generally, this record will be empty for @@ -4322,31 +4083,15 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, // entire table, since later PCH files in a PCH chain are only interested in // the results at the end of the chain. RecordData WeakUndeclaredIdentifiers; - if (!SemaRef.WeakUndeclaredIdentifiers.empty()) { - for (llvm::DenseMap<IdentifierInfo*,WeakInfo>::iterator - I = SemaRef.WeakUndeclaredIdentifiers.begin(), - E = SemaRef.WeakUndeclaredIdentifiers.end(); I != E; ++I) { - AddIdentifierRef(I->first, WeakUndeclaredIdentifiers); - AddIdentifierRef(I->second.getAlias(), WeakUndeclaredIdentifiers); - AddSourceLocation(I->second.getLocation(), WeakUndeclaredIdentifiers); - WeakUndeclaredIdentifiers.push_back(I->second.getUsed()); - } + for (auto &WeakUndeclaredIdentifier : SemaRef.WeakUndeclaredIdentifiers) { + IdentifierInfo *II = WeakUndeclaredIdentifier.first; + WeakInfo &WI = WeakUndeclaredIdentifier.second; + AddIdentifierRef(II, WeakUndeclaredIdentifiers); + AddIdentifierRef(WI.getAlias(), WeakUndeclaredIdentifiers); + AddSourceLocation(WI.getLocation(), WeakUndeclaredIdentifiers); + WeakUndeclaredIdentifiers.push_back(WI.getUsed()); } - // Build a record containing all of the locally-scoped extern "C" - // declarations in this header file. Generally, this record will be - // empty. - RecordData LocallyScopedExternCDecls; - // FIXME: This is filling in the AST file in densemap order which is - // nondeterminstic! - for (llvm::DenseMap<DeclarationName, NamedDecl *>::iterator - TD = SemaRef.LocallyScopedExternCDecls.begin(), - TDEnd = SemaRef.LocallyScopedExternCDecls.end(); - TD != TDEnd; ++TD) { - if (!TD->second->isFromASTFile()) - AddDeclRef(TD->second, LocallyScopedExternCDecls); - } - // Build a record containing all of the ext_vector declarations. RecordData ExtVectorDecls; AddLazyVectorDecls(*this, SemaRef.ExtVectorDecls, ExtVectorDecls); @@ -4366,10 +4111,6 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, for (const TypedefNameDecl *TD : SemaRef.UnusedLocalTypedefNameCandidates) AddDeclRef(TD, UnusedLocalTypedefNameCandidates); - // Build a record containing all of dynamic classes declarations. - RecordData DynamicClasses; - AddLazyVectorDecls(*this, SemaRef.DynamicClasses, DynamicClasses); - // Build a record containing all of pending implicit instantiations. RecordData PendingInstantiations; for (std::deque<Sema::PendingImplicitInstantiation>::iterator @@ -4414,6 +4155,20 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, AddSourceLocation(I->second, UndefinedButUsed); } + // Build a record containing all delete-expressions that we would like to + // analyze later in AST. + RecordData DeleteExprsToAnalyze; + + for (const auto &DeleteExprsInfo : + SemaRef.getMismatchingDeleteExpressions()) { + AddDeclRef(DeleteExprsInfo.first, DeleteExprsToAnalyze); + DeleteExprsToAnalyze.push_back(DeleteExprsInfo.second.size()); + for (const auto &DeleteLoc : DeleteExprsInfo.second) { + AddSourceLocation(DeleteLoc.first, DeleteExprsToAnalyze); + DeleteExprsToAnalyze.push_back(DeleteLoc.second); + } + } + // Write the control block WriteControlBlock(PP, Context, isysroot, OutputFile); @@ -4443,7 +4198,7 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, Record.clear(); Record.push_back(TU_UPDATE_LEXICAL); Stream.EmitRecordWithBlob(TuUpdateLexicalAbbrev, Record, - data(NewGlobalDecls)); + bytes(NewGlobalDecls)); // And a visible updates block for the translation unit. Abv = new llvm::BitCodeAbbrev(); @@ -4453,6 +4208,10 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Blob)); UpdateVisibleAbbrev = Stream.EmitAbbrev(Abv); WriteDeclContextVisibleUpdate(TU); + + // If we have any extern "C" names, write out a visible update for them. + if (Context.ExternCContext) + WriteDeclContextVisibleUpdate(Context.ExternCContext); // If the translation unit has an anonymous namespace, and we don't already // have an update block for it, write it as an update block. @@ -4485,16 +4244,21 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, // Make sure all decls associated with an identifier are registered for // serialization. + llvm::SmallVector<const IdentifierInfo*, 256> IIs; for (IdentifierTable::iterator ID = PP.getIdentifierTable().begin(), IDEnd = PP.getIdentifierTable().end(); ID != IDEnd; ++ID) { const IdentifierInfo *II = ID->second; - if (!Chain || !II->isFromAST() || II->hasChangedSinceDeserialization()) { - for (IdentifierResolver::iterator D = SemaRef.IdResolver.begin(II), - DEnd = SemaRef.IdResolver.end(); - D != DEnd; ++D) { - GetDeclRef(*D); - } + if (!Chain || !II->isFromAST() || II->hasChangedSinceDeserialization()) + IIs.push_back(II); + } + // Sort the identifiers to visit based on their name. + std::sort(IIs.begin(), IIs.end(), llvm::less_ptr<IdentifierInfo>()); + for (const IdentifierInfo *II : IIs) { + for (IdentifierResolver::iterator D = SemaRef.IdResolver.begin(II), + DEnd = SemaRef.IdResolver.end(); + D != DEnd; ++D) { + GetDeclRef(*D); } } @@ -4601,6 +4365,7 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, if (!DeclUpdatesOffsetsRecord.empty()) Stream.EmitRecord(DECL_UPDATE_OFFSETS, DeclUpdatesOffsetsRecord); WriteCXXBaseSpecifiersOffsets(); + WriteCXXCtorInitializersOffsets(); WriteFileDeclIDsMap(); WriteSourceManagerBlock(Context.getSourceManager(), PP); @@ -4637,11 +4402,6 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, Stream.EmitRecord(WEAK_UNDECLARED_IDENTIFIERS, WeakUndeclaredIdentifiers); - // Write the record containing locally-scoped extern "C" definitions. - if (!LocallyScopedExternCDecls.empty()) - Stream.EmitRecord(LOCALLY_SCOPED_EXTERN_C_DECLS, - LocallyScopedExternCDecls); - // Write the record containing ext_vector type names. if (!ExtVectorDecls.empty()) Stream.EmitRecord(EXT_VECTOR_DECLS, ExtVectorDecls); @@ -4650,10 +4410,6 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, if (!VTableUses.empty()) Stream.EmitRecord(VTABLE_USES, VTableUses); - // Write the record containing dynamic classes declarations. - if (!DynamicClasses.empty()) - Stream.EmitRecord(DYNAMIC_CLASSES, DynamicClasses); - // Write the record containing potentially unused local typedefs. if (!UnusedLocalTypedefNameCandidates.empty()) Stream.EmitRecord(UNUSED_LOCAL_TYPEDEF_NAME_CANDIDATES, @@ -4682,7 +4438,10 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, // Write the undefined internal functions and variables, and inline functions. if (!UndefinedButUsed.empty()) Stream.EmitRecord(UNDEFINED_BUT_USED, UndefinedButUsed); - + + if (!DeleteExprsToAnalyze.empty()) + Stream.EmitRecord(DELETE_EXPRS_TO_ANALYZE, DeleteExprsToAnalyze); + // Write the visible updates to DeclContexts. for (auto *DC : UpdatedDeclContexts) WriteDeclContextVisibleUpdate(DC); @@ -4720,7 +4479,7 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, // FIXME: If the module has macros imported then later has declarations // imported, this location won't be the right one as a location for the // declaration imports. - AddSourceLocation(Import.M->MacroVisibilityLoc, ImportedModules); + AddSourceLocation(PP.getModuleImportLoc(Import.M), ImportedModules); } Stream.EmitRecord(IMPORTED_MODULES, ImportedModules); @@ -4729,7 +4488,6 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, WriteDeclReplacementsBlock(); WriteRedeclarations(); - WriteMergedDecls(); WriteObjCCategories(); WriteLateParsedTemplates(SemaRef); if(!WritingModule) @@ -4784,7 +4542,7 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) { case UPD_CXX_INSTANTIATED_CLASS_DEFINITION: { auto *RD = cast<CXXRecordDecl>(D); - AddUpdatedDeclContext(RD->getPrimaryContext()); + UpdatedDeclContexts.insert(RD->getPrimaryContext()); AddCXXDefinitionData(RD, Record); Record.push_back(WriteDeclContextLexicalBlock( *Context, const_cast<CXXRecordDecl *>(RD))); @@ -4828,6 +4586,10 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) { break; } + case UPD_CXX_RESOLVED_DTOR_DELETE: + AddDeclRef(Update.getDecl(), Record); + break; + case UPD_CXX_RESOLVED_EXCEPTION_SPEC: addExceptionSpec( *this, @@ -4846,10 +4608,15 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) { case UPD_STATIC_LOCAL_NUMBER: Record.push_back(Update.getNumber()); break; + case UPD_DECL_MARKED_OPENMP_THREADPRIVATE: AddSourceRange(D->getAttr<OMPThreadPrivateDeclAttr>()->getRange(), Record); break; + + case UPD_DECL_EXPORTED: + Record.push_back(getSubmoduleID(Update.getModule())); + break; } } @@ -4859,8 +4626,6 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) { Record.push_back(Def->isInlined()); AddSourceLocation(Def->getInnerLocStart(), Record); AddFunctionDefinition(Def, Record); - if (auto *DD = dyn_cast<CXXDestructorDecl>(Def)) - Record.push_back(GetDeclRef(DD->getOperatorDelete())); } OffsetsRecord.push_back(GetDeclRef(D)); @@ -4868,11 +4633,7 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) { Stream.EmitRecord(DECL_UPDATES, Record); - // Flush any statements that were written as part of this update record. - FlushStmts(); - - // Flush C++ base specifiers, if there are any. - FlushCXXBaseSpecifiers(); + FlushPendingAfterDecl(); } } @@ -4953,8 +4714,7 @@ MacroID ASTWriter::getMacroID(MacroInfo *MI) { } uint64_t ASTWriter::getMacroDirectivesOffset(const IdentifierInfo *Name) { - assert(IdentMacroDirectivesOffsetMap[Name] && "not set!"); - return IdentMacroDirectivesOffsetMap[Name]; + return IdentMacroDirectivesOffsetMap.lookup(Name); } void ASTWriter::AddSelectorRef(const Selector SelRef, RecordDataImpl &Record) { @@ -4984,8 +4744,16 @@ void ASTWriter::AddCXXTemporary(const CXXTemporary *Temp, RecordDataImpl &Record AddDeclRef(Temp->getDestructor(), Record); } +void ASTWriter::AddCXXCtorInitializersRef(ArrayRef<CXXCtorInitializer *> Inits, + RecordDataImpl &Record) { + assert(!Inits.empty() && "Empty ctor initializer sets are not recorded"); + CXXCtorInitializersToWrite.push_back( + QueuedCXXCtorInitializers(NextCXXCtorInitializersID, Inits)); + Record.push_back(NextCXXCtorInitializersID++); +} + void ASTWriter::AddCXXBaseSpecifiersRef(CXXBaseSpecifier const *Bases, - CXXBaseSpecifier const *BasesEnd, + CXXBaseSpecifier const *BasesEnd, RecordDataImpl &Record) { assert(Bases != BasesEnd && "Empty base-specifier sets are not recorded"); CXXBaseSpecifiersToWrite.push_back( @@ -5060,46 +4828,40 @@ void ASTWriter::AddTypeRef(QualType T, RecordDataImpl &Record) { Record.push_back(GetOrCreateTypeID(T)); } -TypeID ASTWriter::GetOrCreateTypeID( QualType T) { - assert(Context); - return MakeTypeID(*Context, T, - std::bind1st(std::mem_fun(&ASTWriter::GetOrCreateTypeIdx), this)); -} - -TypeID ASTWriter::getTypeID(QualType T) const { +TypeID ASTWriter::GetOrCreateTypeID(QualType T) { assert(Context); - return MakeTypeID(*Context, T, - std::bind1st(std::mem_fun(&ASTWriter::getTypeIdx), this)); -} + return MakeTypeID(*Context, T, [&](QualType T) -> TypeIdx { + if (T.isNull()) + return TypeIdx(); + assert(!T.getLocalFastQualifiers()); -TypeIdx ASTWriter::GetOrCreateTypeIdx(QualType T) { - if (T.isNull()) - return TypeIdx(); - assert(!T.getLocalFastQualifiers()); + TypeIdx &Idx = TypeIdxs[T]; + if (Idx.getIndex() == 0) { + if (DoneWritingDeclsAndTypes) { + assert(0 && "New type seen after serializing all the types to emit!"); + return TypeIdx(); + } - TypeIdx &Idx = TypeIdxs[T]; - if (Idx.getIndex() == 0) { - if (DoneWritingDeclsAndTypes) { - assert(0 && "New type seen after serializing all the types to emit!"); - return TypeIdx(); + // We haven't seen this type before. Assign it a new ID and put it + // into the queue of types to emit. + Idx = TypeIdx(NextTypeID++); + DeclTypesToEmit.push(T); } - - // We haven't seen this type before. Assign it a new ID and put it - // into the queue of types to emit. - Idx = TypeIdx(NextTypeID++); - DeclTypesToEmit.push(T); - } - return Idx; + return Idx; + }); } -TypeIdx ASTWriter::getTypeIdx(QualType T) const { - if (T.isNull()) - return TypeIdx(); - assert(!T.getLocalFastQualifiers()); +TypeID ASTWriter::getTypeID(QualType T) const { + assert(Context); + return MakeTypeID(*Context, T, [&](QualType T) -> TypeIdx { + if (T.isNull()) + return TypeIdx(); + assert(!T.getLocalFastQualifiers()); - TypeIdxMap::const_iterator I = TypeIdxs.find(T); - assert(I != TypeIdxs.end() && "Type not emitted!"); - return I->second; + TypeIdxMap::const_iterator I = TypeIdxs.find(T); + assert(I != TypeIdxs.end() && "Type not emitted!"); + return I->second; + }); } void ASTWriter::AddDeclRef(const Decl *D, RecordDataImpl &Record) { @@ -5234,13 +4996,10 @@ unsigned ASTWriter::getAnonymousDeclarationNumber(const NamedDecl *D) { // already done so. auto It = AnonymousDeclarationNumbers.find(D); if (It == AnonymousDeclarationNumbers.end()) { - unsigned Index = 0; - for (Decl *LexicalD : D->getLexicalDeclContext()->decls()) { - auto *ND = dyn_cast<NamedDecl>(LexicalD); - if (!ND || !needsAnonymousDeclarationNumber(ND)) - continue; - AnonymousDeclarationNumbers[ND] = Index++; - } + auto *DC = D->getLexicalDeclContext(); + numberAnonymousDeclsWithin(DC, [&](const NamedDecl *ND, unsigned Number) { + AnonymousDeclarationNumbers[ND] = Number; + }); It = AnonymousDeclarationNumbers.find(D); assert(It != AnonymousDeclarationNumbers.end() && @@ -5555,7 +5314,8 @@ void ASTWriter::AddCXXBaseSpecifier(const CXXBaseSpecifier &Base, void ASTWriter::FlushCXXBaseSpecifiers() { RecordData Record; - for (unsigned I = 0, N = CXXBaseSpecifiersToWrite.size(); I != N; ++I) { + unsigned N = CXXBaseSpecifiersToWrite.size(); + for (unsigned I = 0; I != N; ++I) { Record.clear(); // Record the offset of this base-specifier set. @@ -5579,6 +5339,8 @@ void ASTWriter::FlushCXXBaseSpecifiers() { FlushStmts(); } + assert(N == CXXBaseSpecifiersToWrite.size() && + "added more base specifiers while writing base specifiers"); CXXBaseSpecifiersToWrite.clear(); } @@ -5620,6 +5382,36 @@ void ASTWriter::AddCXXCtorInitializers( } } +void ASTWriter::FlushCXXCtorInitializers() { + RecordData Record; + + unsigned N = CXXCtorInitializersToWrite.size(); + (void)N; // Silence unused warning in non-assert builds. + for (auto &Init : CXXCtorInitializersToWrite) { + Record.clear(); + + // Record the offset of this mem-initializer list. + unsigned Index = Init.ID - 1; + if (Index == CXXCtorInitializersOffsets.size()) + CXXCtorInitializersOffsets.push_back(Stream.GetCurrentBitNo()); + else { + if (Index > CXXCtorInitializersOffsets.size()) + CXXCtorInitializersOffsets.resize(Index + 1); + CXXCtorInitializersOffsets[Index] = Stream.GetCurrentBitNo(); + } + + AddCXXCtorInitializers(Init.Inits.data(), Init.Inits.size(), Record); + Stream.EmitRecord(serialization::DECL_CXX_CTOR_INITIALIZERS, Record); + + // Flush any expressions that were written as part of the initializers. + FlushStmts(); + } + + assert(N == CXXCtorInitializersToWrite.size() && + "added more ctor initializers while writing ctor initializers"); + CXXCtorInitializersToWrite.clear(); +} + void ASTWriter::AddCXXDefinitionData(const CXXRecordDecl *D, RecordDataImpl &Record) { auto &Data = D->data(); Record.push_back(Data.IsLambda); @@ -5725,6 +5517,8 @@ void ASTWriter::ReaderInitialized(ASTReader *Reader) { Chain = Reader; + // Note, this will get called multiple times, once one the reader starts up + // and again each time it's done reading a PCH or module. FirstDeclID = NUM_PREDEF_DECL_IDS + Chain->getTotalNumDecls(); FirstTypeID = NUM_PREDEF_TYPE_IDS + Chain->getTotalNumTypes(); FirstIdentID = NUM_PREDEF_IDENT_IDS + Chain->getTotalNumIdentifiers(); @@ -5772,7 +5566,7 @@ void ASTWriter::SelectorRead(SelectorID ID, Selector S) { } void ASTWriter::MacroDefinitionRead(serialization::PreprocessedEntityID ID, - MacroDefinition *MD) { + MacroDefinitionRecord *MD) { assert(MacroDefinitions.find(MD) == MacroDefinitions.end()); MacroDefinitions[MD] = ID; } @@ -5809,7 +5603,7 @@ void ASTWriter::AddedVisibleDecl(const DeclContext *DC, const Decl *D) { assert(!getDefinitiveDeclContext(DC) && "DeclContext not definitive!"); assert(!WritingAST && "Already writing the AST!"); - AddUpdatedDeclContext(DC); + UpdatedDeclContexts.insert(DC); UpdatingVisibleDecls.push_back(D); } @@ -5863,21 +5657,36 @@ void ASTWriter::AddedCXXTemplateSpecialization(const FunctionTemplateDecl *TD, } void ASTWriter::ResolvedExceptionSpec(const FunctionDecl *FD) { - assert(!WritingAST && "Already writing the AST!"); - FD = FD->getCanonicalDecl(); - if (!FD->isFromASTFile()) - return; // Not a function declared in PCH and defined outside. - - DeclUpdates[FD].push_back(UPD_CXX_RESOLVED_EXCEPTION_SPEC); + assert(!DoneWritingDeclsAndTypes && "Already done writing updates!"); + if (!Chain) return; + Chain->forEachFormerlyCanonicalImportedDecl(FD, [&](const Decl *D) { + // If we don't already know the exception specification for this redecl + // chain, add an update record for it. + if (isUnresolvedExceptionSpec(cast<FunctionDecl>(D) + ->getType() + ->castAs<FunctionProtoType>() + ->getExceptionSpecType())) + DeclUpdates[D].push_back(UPD_CXX_RESOLVED_EXCEPTION_SPEC); + }); } void ASTWriter::DeducedReturnType(const FunctionDecl *FD, QualType ReturnType) { assert(!WritingAST && "Already writing the AST!"); - FD = FD->getCanonicalDecl(); - if (!FD->isFromASTFile()) - return; // Not a function declared in PCH and defined outside. + if (!Chain) return; + Chain->forEachFormerlyCanonicalImportedDecl(FD, [&](const Decl *D) { + DeclUpdates[D].push_back( + DeclUpdate(UPD_CXX_DEDUCED_RETURN_TYPE, ReturnType)); + }); +} - DeclUpdates[FD].push_back(DeclUpdate(UPD_CXX_DEDUCED_RETURN_TYPE, ReturnType)); +void ASTWriter::ResolvedOperatorDelete(const CXXDestructorDecl *DD, + const FunctionDecl *Delete) { + assert(!WritingAST && "Already writing the AST!"); + assert(Delete && "Not given an operator delete"); + if (!Chain) return; + Chain->forEachFormerlyCanonicalImportedDecl(DD, [&](const Decl *D) { + DeclUpdates[D].push_back(DeclUpdate(UPD_CXX_RESOLVED_DTOR_DELETE, Delete)); + }); } void ASTWriter::CompletedImplicitDefinition(const FunctionDecl *D) { @@ -5894,8 +5703,7 @@ void ASTWriter::FunctionDefinitionInstantiated(const FunctionDecl *D) { if (!D->isFromASTFile()) return; - DeclUpdates[D].push_back( - DeclUpdate(UPD_CXX_ADDED_FUNCTION_DEFINITION)); + DeclUpdates[D].push_back(DeclUpdate(UPD_CXX_ADDED_FUNCTION_DEFINITION)); } void ASTWriter::StaticDataMemberInstantiated(const VarDecl *D) { @@ -5951,3 +5759,12 @@ void ASTWriter::DeclarationMarkedOpenMPThreadPrivate(const Decl *D) { DeclUpdates[D].push_back(DeclUpdate(UPD_DECL_MARKED_OPENMP_THREADPRIVATE)); } + +void ASTWriter::RedefinedHiddenDefinition(const NamedDecl *D, Module *M) { + assert(!WritingAST && "Already writing the AST!"); + assert(D->isHidden() && "expected a hidden declaration"); + if (!D->isFromASTFile()) + return; + + DeclUpdates[D].push_back(DeclUpdate(UPD_DECL_EXPORTED, M)); +} diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp index c97c2d8..0fa4f93 100644 --- a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp +++ b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp @@ -133,11 +133,66 @@ namespace clang { void AddFunctionDefinition(const FunctionDecl *FD) { assert(FD->doesThisDeclarationHaveABody()); - if (auto *CD = dyn_cast<CXXConstructorDecl>(FD)) - Writer.AddCXXCtorInitializers(CD->CtorInitializers, - CD->NumCtorInitializers, Record); + if (auto *CD = dyn_cast<CXXConstructorDecl>(FD)) { + Record.push_back(CD->NumCtorInitializers); + if (CD->NumCtorInitializers) + Writer.AddCXXCtorInitializersRef( + llvm::makeArrayRef(CD->init_begin(), CD->init_end()), Record); + } Writer.AddStmt(FD->getBody()); } + + /// Get the specialization decl from an entry in the specialization list. + template <typename EntryType> + typename RedeclarableTemplateDecl::SpecEntryTraits<EntryType>::DeclType * + getSpecializationDecl(EntryType &T) { + return RedeclarableTemplateDecl::SpecEntryTraits<EntryType>::getDecl(&T); + } + + /// Get the list of partial specializations from a template's common ptr. + template<typename T> + decltype(T::PartialSpecializations) &getPartialSpecializations(T *Common) { + return Common->PartialSpecializations; + } + ArrayRef<Decl> getPartialSpecializations(FunctionTemplateDecl::Common *) { + return None; + } + + template<typename Decl> + void AddTemplateSpecializations(Decl *D) { + auto *Common = D->getCommonPtr(); + + // If we have any lazy specializations, and the external AST source is + // our chained AST reader, we can just write out the DeclIDs. Otherwise, + // we need to resolve them to actual declarations. + if (Writer.Chain != Writer.Context->getExternalSource() && + Common->LazySpecializations) { + D->LoadLazySpecializations(); + assert(!Common->LazySpecializations); + } + + auto &Specializations = Common->Specializations; + auto &&PartialSpecializations = getPartialSpecializations(Common); + ArrayRef<DeclID> LazySpecializations; + if (auto *LS = Common->LazySpecializations) + LazySpecializations = ArrayRef<DeclID>(LS + 1, LS + 1 + LS[0]); + + Record.push_back(Specializations.size() + + PartialSpecializations.size() + + LazySpecializations.size()); + for (auto &Entry : Specializations) { + auto *D = getSpecializationDecl(Entry); + assert(D->isCanonicalDecl() && "non-canonical decl in set"); + Writer.AddDeclRef(D, Record); + } + for (auto &Entry : PartialSpecializations) { + auto *D = getSpecializationDecl(Entry); + assert(D->isCanonicalDecl() && "non-canonical decl in set"); + Writer.AddDeclRef(D, Record); + } + for (DeclID ID : LazySpecializations) + Record.push_back(ID); + } }; } @@ -157,7 +212,7 @@ void ASTDeclWriter::Visit(Decl *D) { if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { Record.push_back(FD->doesThisDeclarationHaveABody()); if (FD->doesThisDeclarationHaveABody()) - Writer.AddStmt(FD->getBody()); + AddFunctionDefinition(FD); } } @@ -188,7 +243,7 @@ void ASTDeclWriter::VisitDecl(Decl *D) { while (auto *NS = dyn_cast<NamespaceDecl>(DC->getRedeclContext())) { if (!NS->isFromASTFile()) break; - Writer.AddUpdatedDeclContext(NS->getPrimaryContext()); + Writer.UpdatedDeclContexts.insert(NS->getPrimaryContext()); if (!NS->isInlineNamespace()) break; DC = NS->getParent(); @@ -203,8 +258,9 @@ void ASTDeclWriter::VisitTranslationUnitDecl(TranslationUnitDecl *D) { void ASTDeclWriter::VisitNamedDecl(NamedDecl *D) { VisitDecl(D); Writer.AddDeclarationName(D->getDeclName(), Record); - if (needsAnonymousDeclarationNumber(D)) - Record.push_back(Writer.getAnonymousDeclarationNumber(D)); + Record.push_back(needsAnonymousDeclarationNumber(D) + ? Writer.getAnonymousDeclarationNumber(D) + : 0); } void ASTDeclWriter::VisitTypeDecl(TypeDecl *D) { @@ -658,8 +714,10 @@ void ASTDeclWriter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) { Writer.AddSourceLocation(D->getIvarRBraceLoc(), Record); Record.push_back(D->hasNonZeroConstructors()); Record.push_back(D->hasDestructors()); - Writer.AddCXXCtorInitializers(D->IvarInitializers, D->NumIvarInitializers, - Record); + Record.push_back(D->NumIvarInitializers); + if (D->NumIvarInitializers) + Writer.AddCXXCtorInitializersRef( + llvm::makeArrayRef(D->init_begin(), D->init_end()), Record); Code = serialization::DECL_OBJC_IMPLEMENTATION; } @@ -732,13 +790,15 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) { Record.push_back(D->getStorageClass()); Record.push_back(D->getTSCSpec()); Record.push_back(D->getInitStyle()); - Record.push_back(D->isExceptionVariable()); - Record.push_back(D->isNRVOVariable()); - Record.push_back(D->isCXXForRangeDecl()); - Record.push_back(D->isARCPseudoStrong()); - Record.push_back(D->isConstexpr()); - Record.push_back(D->isInitCapture()); - Record.push_back(D->isPreviousDeclInSameBlockScope()); + if (!isa<ParmVarDecl>(D)) { + Record.push_back(D->isExceptionVariable()); + Record.push_back(D->isNRVOVariable()); + Record.push_back(D->isCXXForRangeDecl()); + Record.push_back(D->isARCPseudoStrong()); + Record.push_back(D->isConstexpr()); + Record.push_back(D->isInitCapture()); + Record.push_back(D->isPreviousDeclInSameBlockScope()); + } Record.push_back(D->getLinkageInternal()); if (D->getInit()) { @@ -920,17 +980,34 @@ void ASTDeclWriter::VisitNamespaceDecl(NamespaceDecl *D) { if (Writer.hasChain() && !D->isOriginalNamespace() && D->getOriginalNamespace()->isFromASTFile()) { NamespaceDecl *NS = D->getOriginalNamespace(); - Writer.AddUpdatedDeclContext(NS); - - // Make sure all visible decls are written. They will be recorded later. - if (StoredDeclsMap *Map = NS->buildLookup()) { - for (StoredDeclsMap::iterator D = Map->begin(), DEnd = Map->end(); - D != DEnd; ++D) { - DeclContext::lookup_result R = D->second.getLookupResult(); - for (DeclContext::lookup_iterator I = R.begin(), E = R.end(); I != E; - ++I) - Writer.GetDeclRef(*I); + Writer.UpdatedDeclContexts.insert(NS); + + // Make sure all visible decls are written. They will be recorded later. We + // do this using a side data structure so we can sort the names into + // a deterministic order. + StoredDeclsMap *Map = NS->buildLookup(); + SmallVector<std::pair<DeclarationName, DeclContext::lookup_result>, 16> + LookupResults; + LookupResults.reserve(Map->size()); + for (auto &Entry : *Map) + LookupResults.push_back( + std::make_pair(Entry.first, Entry.second.getLookupResult())); + + std::sort(LookupResults.begin(), LookupResults.end(), llvm::less_first()); + for (auto &NameAndResult : LookupResults) { + DeclarationName Name = NameAndResult.first; + DeclContext::lookup_result Result = NameAndResult.second; + if (Name.getNameKind() == DeclarationName::CXXConstructorName || + Name.getNameKind() == DeclarationName::CXXConversionFunctionName) { + // We have to work around a name lookup bug here where negative lookup + // results for these names get cached in namespace lookup tables. + assert(Result.empty() && "Cannot have a constructor or conversion " + "function name in a namespace!"); + continue; } + + for (NamedDecl *ND : Result) + Writer.GetDeclRef(ND); } } @@ -1067,8 +1144,6 @@ void ASTDeclWriter::VisitCXXConstructorDecl(CXXConstructorDecl *D) { Writer.AddDeclRef(D->getInheritedConstructor(), Record); Record.push_back(D->IsExplicitSpecified); - Writer.AddCXXCtorInitializers(D->CtorInitializers, D->NumCtorInitializers, - Record); Code = serialization::DECL_CXX_CONSTRUCTOR; } @@ -1076,7 +1151,7 @@ void ASTDeclWriter::VisitCXXConstructorDecl(CXXConstructorDecl *D) { void ASTDeclWriter::VisitCXXDestructorDecl(CXXDestructorDecl *D) { VisitCXXMethodDecl(D); - Writer.AddDeclRef(D->OperatorDelete, Record); + Writer.AddDeclRef(D->getOperatorDelete(), Record); Code = serialization::DECL_CXX_DESTRUCTOR; } @@ -1171,24 +1246,8 @@ void ASTDeclWriter::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) { void ASTDeclWriter::VisitClassTemplateDecl(ClassTemplateDecl *D) { VisitRedeclarableTemplateDecl(D); - if (D->isFirstDecl()) { - typedef llvm::FoldingSetVector<ClassTemplateSpecializationDecl> CTSDSetTy; - CTSDSetTy &CTSDSet = D->getSpecializations(); - Record.push_back(CTSDSet.size()); - for (CTSDSetTy::iterator I=CTSDSet.begin(), E = CTSDSet.end(); I!=E; ++I) { - assert(I->isCanonicalDecl() && "Expected only canonical decls in set"); - Writer.AddDeclRef(&*I, Record); - } - - typedef llvm::FoldingSetVector<ClassTemplatePartialSpecializationDecl> - CTPSDSetTy; - CTPSDSetTy &CTPSDSet = D->getPartialSpecializations(); - Record.push_back(CTPSDSet.size()); - for (CTPSDSetTy::iterator I=CTPSDSet.begin(), E=CTPSDSet.end(); I!=E; ++I) { - assert(I->isCanonicalDecl() && "Expected only canonical decls in set"); - Writer.AddDeclRef(&*I, Record); - } - } + if (D->isFirstDecl()) + AddTemplateSpecializations(D); Code = serialization::DECL_CLASS_TEMPLATE; } @@ -1246,26 +1305,8 @@ void ASTDeclWriter::VisitClassTemplatePartialSpecializationDecl( void ASTDeclWriter::VisitVarTemplateDecl(VarTemplateDecl *D) { VisitRedeclarableTemplateDecl(D); - if (D->isFirstDecl()) { - typedef llvm::FoldingSetVector<VarTemplateSpecializationDecl> VTSDSetTy; - VTSDSetTy &VTSDSet = D->getSpecializations(); - Record.push_back(VTSDSet.size()); - for (VTSDSetTy::iterator I = VTSDSet.begin(), E = VTSDSet.end(); I != E; - ++I) { - assert(I->isCanonicalDecl() && "Expected only canonical decls in set"); - Writer.AddDeclRef(&*I, Record); - } - - typedef llvm::FoldingSetVector<VarTemplatePartialSpecializationDecl> - VTPSDSetTy; - VTPSDSetTy &VTPSDSet = D->getPartialSpecializations(); - Record.push_back(VTPSDSet.size()); - for (VTPSDSetTy::iterator I = VTPSDSet.begin(), E = VTPSDSet.end(); I != E; - ++I) { - assert(I->isCanonicalDecl() && "Expected only canonical decls in set"); - Writer.AddDeclRef(&*I, Record); - } - } + if (D->isFirstDecl()) + AddTemplateSpecializations(D); Code = serialization::DECL_VAR_TEMPLATE; } @@ -1330,19 +1371,8 @@ void ASTDeclWriter::VisitClassScopeFunctionSpecializationDecl( void ASTDeclWriter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) { VisitRedeclarableTemplateDecl(D); - if (D->isFirstDecl()) { - // This FunctionTemplateDecl owns the CommonPtr; write it. - - // Write the function specialization declarations. - Record.push_back(D->getSpecializations().size()); - for (llvm::FoldingSetVector<FunctionTemplateSpecializationInfo>::iterator - I = D->getSpecializations().begin(), - E = D->getSpecializations().end() ; I != E; ++I) { - assert(I->Function->isCanonicalDecl() && - "Expected only canonical decls in set"); - Writer.AddDeclRef(I->Function, Record); - } - } + if (D->isFirstDecl()) + AddTemplateSpecializations(D); Code = serialization::DECL_FUNCTION_TEMPLATE; } @@ -1448,25 +1478,59 @@ void ASTDeclWriter::VisitDeclContext(DeclContext *DC, uint64_t LexicalOffset, template <typename T> void ASTDeclWriter::VisitRedeclarable(Redeclarable<T> *D) { T *First = D->getFirstDecl(); - if (First->getMostRecentDecl() != First) { + T *MostRecent = First->getMostRecentDecl(); + if (MostRecent != First) { assert(isRedeclarableDeclKind(static_cast<T *>(D)->getKind()) && "Not considered redeclarable?"); - + // There is more than one declaration of this entity, so we will need to // write a redeclaration chain. Writer.AddDeclRef(First, Record); Writer.Redeclarations.insert(First); + auto *Previous = D->getPreviousDecl(); + + // In a modules build, we can have imported declarations after a local + // canonical declaration. If this is the first local declaration, emit + // a list of all such imported declarations so that we can ensure they + // are loaded before we are. This allows us to rebuild the redecl chain + // in the right order on reload (all declarations imported by a module + // should be before all declarations provided by that module). + bool EmitImportedMergedCanonicalDecls = false; + if (Context.getLangOpts().Modules && Writer.Chain) { + auto *PreviousLocal = Previous; + while (PreviousLocal && PreviousLocal->isFromASTFile()) + PreviousLocal = PreviousLocal->getPreviousDecl(); + if (!PreviousLocal) + EmitImportedMergedCanonicalDecls = true; + } + if (EmitImportedMergedCanonicalDecls) { + llvm::SmallMapVector<ModuleFile*, Decl*, 16> FirstInModule; + for (auto *Redecl = MostRecent; Redecl; + Redecl = Redecl->getPreviousDecl()) + if (Redecl->isFromASTFile()) + FirstInModule[Writer.Chain->getOwningModuleFile(Redecl)] = Redecl; + // FIXME: If FirstInModule has entries for modules A and B, and B imports + // A (directly or indirectly), we don't need to write the entry for A. + Record.push_back(FirstInModule.size()); + for (auto I = FirstInModule.rbegin(), E = FirstInModule.rend(); + I != E; ++I) + Writer.AddDeclRef(I->second, Record); + } else + Record.push_back(0); + // Make sure that we serialize both the previous and the most-recent // declarations, which (transitively) ensures that all declarations in the // chain get serialized. - (void)Writer.GetDeclRef(D->getPreviousDecl()); - (void)Writer.GetDeclRef(First->getMostRecentDecl()); + // + // FIXME: This is not correct; when we reach an imported declaration we + // won't emit its previous declaration. + (void)Writer.GetDeclRef(Previous); + (void)Writer.GetDeclRef(MostRecent); } else { // We use the sentinel value 0 to indicate an only declaration. Record.push_back(0); } - } void ASTDeclWriter::VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D) { @@ -1504,6 +1568,7 @@ void ASTWriter::WriteDeclAbbrevs() { // NamedDecl Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name + Abv->Add(BitCodeAbbrevOp(0)); // AnonDeclNumber // ValueDecl Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type // DeclaratorDecl @@ -1536,6 +1601,7 @@ void ASTWriter::WriteDeclAbbrevs() { // NamedDecl Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name + Abv->Add(BitCodeAbbrevOp(0)); // AnonDeclNumber // ValueDecl Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type // DeclaratorDecl @@ -1573,6 +1639,7 @@ void ASTWriter::WriteDeclAbbrevs() { // NamedDecl Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name + Abv->Add(BitCodeAbbrevOp(0)); // AnonDeclNumber // TypeDecl Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type Ref @@ -1620,6 +1687,7 @@ void ASTWriter::WriteDeclAbbrevs() { // NamedDecl Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name + Abv->Add(BitCodeAbbrevOp(0)); // AnonDeclNumber // TypeDecl Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type Ref @@ -1662,6 +1730,7 @@ void ASTWriter::WriteDeclAbbrevs() { // NamedDecl Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name + Abv->Add(BitCodeAbbrevOp(0)); // AnonDeclNumber // ValueDecl Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type // DeclaratorDecl @@ -1671,13 +1740,6 @@ void ASTWriter::WriteDeclAbbrevs() { Abv->Add(BitCodeAbbrevOp(0)); // StorageClass Abv->Add(BitCodeAbbrevOp(0)); // getTSCSpec Abv->Add(BitCodeAbbrevOp(0)); // hasCXXDirectInitializer - Abv->Add(BitCodeAbbrevOp(0)); // isExceptionVariable - Abv->Add(BitCodeAbbrevOp(0)); // isNRVOVariable - Abv->Add(BitCodeAbbrevOp(0)); // isCXXForRangeDecl - Abv->Add(BitCodeAbbrevOp(0)); // isARCPseudoStrong - Abv->Add(BitCodeAbbrevOp(0)); // isConstexpr - Abv->Add(BitCodeAbbrevOp(0)); // isInitCapture - Abv->Add(BitCodeAbbrevOp(0)); // isPrevDeclInSameScope Abv->Add(BitCodeAbbrevOp(0)); // Linkage Abv->Add(BitCodeAbbrevOp(0)); // HasInit Abv->Add(BitCodeAbbrevOp(0)); // HasMemberSpecializationInfo @@ -1715,6 +1777,7 @@ void ASTWriter::WriteDeclAbbrevs() { // NamedDecl Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name + Abv->Add(BitCodeAbbrevOp(0)); // AnonDeclNumber // TypeDecl Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type Ref @@ -1743,6 +1806,7 @@ void ASTWriter::WriteDeclAbbrevs() { // NamedDecl Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name + Abv->Add(BitCodeAbbrevOp(0)); // AnonDeclNumber // ValueDecl Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type // DeclaratorDecl @@ -1788,6 +1852,7 @@ void ASTWriter::WriteDeclAbbrevs() { // NamedDecl Abv->Add(BitCodeAbbrevOp(DeclarationName::Identifier)); // NameKind Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Identifier + Abv->Add(BitCodeAbbrevOp(0)); // AnonDeclNumber // ValueDecl Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type // DeclaratorDecl @@ -1948,9 +2013,10 @@ void ASTWriter::WriteDecl(ASTContext &Context, Decl *D) { // Determine the ID for this declaration. serialization::DeclID ID; - if (D->isFromASTFile()) + if (D->isFromASTFile()) { + assert(isRewritten(D) && "should not be emitting imported decl"); ID = getDeclID(D); - else { + } else { serialization::DeclID &IDR = DeclIDs[D]; if (IDR == 0) IDR = NextDeclID++; @@ -2015,12 +2081,10 @@ void ASTWriter::WriteDecl(ASTContext &Context, Decl *D) { D->getDeclKindName() + "'"); Stream.EmitRecord(W.Code, Record, W.AbbrevToUse); - // Flush any expressions that were written as part of this declaration. - FlushStmts(); - - // Flush C++ base specifiers, if there are any. - FlushCXXBaseSpecifiers(); - + // Flush any expressions, base specifiers, and ctor initializers that + // were written as part of this declaration. + FlushPendingAfterDecl(); + // Note declarations that should be deserialized eagerly so that we can add // them to a record in the AST file later. if (isRequiredDecl(D, Context)) diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp index e980ce7..ec822f0 100644 --- a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp +++ b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp @@ -553,6 +553,7 @@ void ASTStmtWriter::VisitMemberExpr(MemberExpr *E) { Writer.AddDeclRef(E->getMemberDecl(), Record); Writer.AddSourceLocation(E->getMemberLoc(), Record); Record.push_back(E->isArrow()); + Writer.AddSourceLocation(E->getOperatorLoc(), Record); Writer.AddDeclarationNameLoc(E->MemberDNLoc, E->getMemberDecl()->getDeclName(), Record); Code = serialization::EXPR_MEMBER; @@ -1744,6 +1745,7 @@ void OMPClauseWriter::VisitOMPProcBindClause(OMPProcBindClause *C) { void OMPClauseWriter::VisitOMPScheduleClause(OMPScheduleClause *C) { Record.push_back(C->getScheduleKind()); Writer->Writer.AddStmt(C->getChunkSize()); + Writer->Writer.AddStmt(C->getHelperChunkSize()); Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record); Writer->Writer.AddSourceLocation(C->getScheduleKindLoc(), Record); Writer->Writer.AddSourceLocation(C->getCommaLoc(), Record); @@ -1797,6 +1799,14 @@ void OMPClauseWriter::VisitOMPLastprivateClause(OMPLastprivateClause *C) { Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record); for (auto *VE : C->varlists()) Writer->Writer.AddStmt(VE); + for (auto *E : C->private_copies()) + Writer->Writer.AddStmt(E); + for (auto *E : C->source_exprs()) + Writer->Writer.AddStmt(E); + for (auto *E : C->destination_exprs()) + Writer->Writer.AddStmt(E); + for (auto *E : C->assignment_ops()) + Writer->Writer.AddStmt(E); } void OMPClauseWriter::VisitOMPSharedClause(OMPSharedClause *C) { @@ -1814,15 +1824,32 @@ void OMPClauseWriter::VisitOMPReductionClause(OMPReductionClause *C) { Writer->Writer.AddDeclarationNameInfo(C->getNameInfo(), Record); for (auto *VE : C->varlists()) Writer->Writer.AddStmt(VE); + for (auto *E : C->lhs_exprs()) + Writer->Writer.AddStmt(E); + for (auto *E : C->rhs_exprs()) + Writer->Writer.AddStmt(E); + for (auto *E : C->reduction_ops()) + Writer->Writer.AddStmt(E); } void OMPClauseWriter::VisitOMPLinearClause(OMPLinearClause *C) { Record.push_back(C->varlist_size()); Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record); Writer->Writer.AddSourceLocation(C->getColonLoc(), Record); - for (auto *VE : C->varlists()) + for (auto *VE : C->varlists()) { + Writer->Writer.AddStmt(VE); + } + for (auto *VE : C->inits()) { Writer->Writer.AddStmt(VE); + } + for (auto *VE : C->updates()) { + Writer->Writer.AddStmt(VE); + } + for (auto *VE : C->finals()) { + Writer->Writer.AddStmt(VE); + } Writer->Writer.AddStmt(C->getStep()); + Writer->Writer.AddStmt(C->getCalcStep()); } void OMPClauseWriter::VisitOMPAlignedClause(OMPAlignedClause *C) { @@ -1839,6 +1866,12 @@ void OMPClauseWriter::VisitOMPCopyinClause(OMPCopyinClause *C) { Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record); for (auto *VE : C->varlists()) Writer->Writer.AddStmt(VE); + for (auto *E : C->source_exprs()) + Writer->Writer.AddStmt(E); + for (auto *E : C->destination_exprs()) + Writer->Writer.AddStmt(E); + for (auto *E : C->assignment_ops()) + Writer->Writer.AddStmt(E); } void OMPClauseWriter::VisitOMPCopyprivateClause(OMPCopyprivateClause *C) { @@ -1846,6 +1879,12 @@ void OMPClauseWriter::VisitOMPCopyprivateClause(OMPCopyprivateClause *C) { Writer->Writer.AddSourceLocation(C->getLParenLoc(), Record); for (auto *VE : C->varlists()) Writer->Writer.AddStmt(VE); + for (auto *E : C->source_exprs()) + Writer->Writer.AddStmt(E); + for (auto *E : C->destination_exprs()) + Writer->Writer.AddStmt(E); + for (auto *E : C->assignment_ops()) + Writer->Writer.AddStmt(E); } void OMPClauseWriter::VisitOMPFlushClause(OMPFlushClause *C) { @@ -1990,6 +2029,9 @@ void ASTStmtWriter::VisitOMPAtomicDirective(OMPAtomicDirective *D) { Writer.AddStmt(D->getX()); Writer.AddStmt(D->getV()); Writer.AddStmt(D->getExpr()); + Writer.AddStmt(D->getUpdateExpr()); + Record.push_back(D->isXLHSInRHSPart() ? 1 : 0); + Record.push_back(D->isPostfixUpdate() ? 1 : 0); Code = serialization::STMT_OMP_ATOMIC_DIRECTIVE; } diff --git a/contrib/llvm/tools/clang/lib/Serialization/GlobalModuleIndex.cpp b/contrib/llvm/tools/clang/lib/Serialization/GlobalModuleIndex.cpp index 4791388..1b52b44 100644 --- a/contrib/llvm/tools/clang/lib/Serialization/GlobalModuleIndex.cpp +++ b/contrib/llvm/tools/clang/lib/Serialization/GlobalModuleIndex.cpp @@ -757,7 +757,7 @@ void GlobalModuleIndexBuilder::writeIndex(llvm::BitstreamWriter &Stream) { Record.clear(); Record.push_back(IDENTIFIER_INDEX); Record.push_back(BucketOffset); - Stream.EmitRecordWithBlob(IDTableAbbrev, Record, IdentifierTable.str()); + Stream.EmitRecordWithBlob(IDTableAbbrev, Record, IdentifierTable); } Stream.ExitBlock(); @@ -841,12 +841,12 @@ GlobalModuleIndex::writeIndex(FileManager &FileMgr, StringRef Path) { return EC_IOError; // Remove the old index file. It isn't relevant any more. - llvm::sys::fs::remove(IndexPath.str()); + llvm::sys::fs::remove(IndexPath); // Rename the newly-written index file to the proper name. - if (llvm::sys::fs::rename(IndexTmpPath.str(), IndexPath.str())) { + if (llvm::sys::fs::rename(IndexTmpPath, IndexPath)) { // Rename failed; just remove the - llvm::sys::fs::remove(IndexTmpPath.str()); + llvm::sys::fs::remove(IndexTmpPath); return EC_IOError; } diff --git a/contrib/llvm/tools/clang/lib/Serialization/Module.cpp b/contrib/llvm/tools/clang/lib/Serialization/Module.cpp index 6c48a41..3b237d5 100644 --- a/contrib/llvm/tools/clang/lib/Serialization/Module.cpp +++ b/contrib/llvm/tools/clang/lib/Serialization/Module.cpp @@ -38,6 +38,7 @@ ModuleFile::ModuleFile(ModuleKind Kind, unsigned Generation) SelectorLookupTableData(nullptr), SelectorLookupTable(nullptr), LocalNumDecls(0), DeclOffsets(nullptr), BaseDeclID(0), LocalNumCXXBaseSpecifiers(0), CXXBaseSpecifiersOffsets(nullptr), + LocalNumCXXCtorInitializers(0), CXXCtorInitializersOffsets(nullptr), FileSortedDecls(nullptr), NumFileSortedDecls(0), RedeclarationsMap(nullptr), LocalNumRedeclarationsInMap(0), ObjCCategoriesMap(nullptr), LocalNumObjCCategoriesInMap(0), diff --git a/contrib/llvm/tools/clang/lib/Serialization/ModuleManager.cpp b/contrib/llvm/tools/clang/lib/Serialization/ModuleManager.cpp index ac98ca0..30d9c89 100644 --- a/contrib/llvm/tools/clang/lib/Serialization/ModuleManager.cpp +++ b/contrib/llvm/tools/clang/lib/Serialization/ModuleManager.cpp @@ -58,8 +58,7 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type, unsigned Generation, off_t ExpectedSize, time_t ExpectedModTime, ASTFileSignature ExpectedSignature, - std::function<ASTFileSignature(llvm::BitstreamReader &)> - ReadSignature, + ASTFileSignatureReader ReadSignature, ModuleFile *&Module, std::string &ErrorStr) { Module = nullptr; @@ -95,6 +94,8 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type, New->File = Entry; New->ImportLoc = ImportLoc; Chain.push_back(New); + if (!ImportedBy) + Roots.push_back(New); NewModule = true; ModuleEntry = New; @@ -156,7 +157,12 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type, // invalidate the file cache for Entry, and that is not safe if this // module is *itself* up to date, but has an out-of-date importer. Modules.erase(Entry); + assert(Chain.back() == ModuleEntry); Chain.pop_back(); + if (Roots.back() == ModuleEntry) + Roots.pop_back(); + else + assert(ImportedBy); delete ModuleEntry; } return OutOfDate; @@ -187,12 +193,15 @@ void ModuleManager::removeModules( // Collect the set of module file pointers that we'll be removing. llvm::SmallPtrSet<ModuleFile *, 4> victimSet(first, last); + auto IsVictim = [&](ModuleFile *MF) { + return victimSet.count(MF); + }; // Remove any references to the now-destroyed modules. for (unsigned i = 0, n = Chain.size(); i != n; ++i) { - Chain[i]->ImportedBy.remove_if([&](ModuleFile *MF) { - return victimSet.count(MF); - }); + Chain[i]->ImportedBy.remove_if(IsVictim); } + Roots.erase(std::remove_if(Roots.begin(), Roots.end(), IsVictim), + Roots.end()); // Delete the modules and erase them from the various structures. for (ModuleIterator victim = first; victim != last; ++victim) { @@ -227,6 +236,15 @@ ModuleManager::addInMemoryBuffer(StringRef FileName, InMemoryBuffers[Entry] = std::move(Buffer); } +bool ModuleManager::addKnownModuleFile(StringRef FileName) { + const FileEntry *File; + if (lookupModuleFile(FileName, 0, 0, File)) + return true; + if (!Modules.count(File)) + AdditionalKnownModuleFiles.insert(File); + return false; +} + ModuleManager::VisitState *ModuleManager::allocateVisitState() { // Fast path: if we have a cached state, use it. if (FirstVisitState) { @@ -263,6 +281,8 @@ void ModuleManager::setGlobalIndex(GlobalModuleIndex *Index) { } void ModuleManager::moduleFileAccepted(ModuleFile *MF) { + AdditionalKnownModuleFiles.remove(MF->File); + if (!GlobalIndex || GlobalIndex->loadedModuleFile(MF)) return; @@ -388,16 +408,38 @@ ModuleManager::visit(bool (*Visitor)(ModuleFile &M, void *UserData), returnVisitState(State); } +static void markVisitedDepthFirst(ModuleFile &M, + SmallVectorImpl<bool> &Visited) { + for (llvm::SetVector<ModuleFile *>::iterator IM = M.Imports.begin(), + IMEnd = M.Imports.end(); + IM != IMEnd; ++IM) { + if (Visited[(*IM)->Index]) + continue; + Visited[(*IM)->Index] = true; + if (!M.DirectlyImported) + markVisitedDepthFirst(**IM, Visited); + } +} + /// \brief Perform a depth-first visit of the current module. -static bool visitDepthFirst(ModuleFile &M, - bool (*Visitor)(ModuleFile &M, bool Preorder, - void *UserData), - void *UserData, - SmallVectorImpl<bool> &Visited) { - // Preorder visitation - if (Visitor(M, /*Preorder=*/true, UserData)) - return true; - +static bool visitDepthFirst( + ModuleFile &M, + ModuleManager::DFSPreorderControl (*PreorderVisitor)(ModuleFile &M, + void *UserData), + bool (*PostorderVisitor)(ModuleFile &M, void *UserData), void *UserData, + SmallVectorImpl<bool> &Visited) { + if (PreorderVisitor) { + switch (PreorderVisitor(M, UserData)) { + case ModuleManager::Abort: + return true; + case ModuleManager::SkipImports: + markVisitedDepthFirst(M, Visited); + return false; + case ModuleManager::Continue: + break; + } + } + // Visit children for (llvm::SetVector<ModuleFile *>::iterator IM = M.Imports.begin(), IMEnd = M.Imports.end(); @@ -406,24 +448,27 @@ static bool visitDepthFirst(ModuleFile &M, continue; Visited[(*IM)->Index] = true; - if (visitDepthFirst(**IM, Visitor, UserData, Visited)) + if (visitDepthFirst(**IM, PreorderVisitor, PostorderVisitor, UserData, Visited)) return true; } - // Postorder visitation - return Visitor(M, /*Preorder=*/false, UserData); + if (PostorderVisitor) + return PostorderVisitor(M, UserData); + + return false; } -void ModuleManager::visitDepthFirst(bool (*Visitor)(ModuleFile &M, bool Preorder, - void *UserData), - void *UserData) { +void ModuleManager::visitDepthFirst( + ModuleManager::DFSPreorderControl (*PreorderVisitor)(ModuleFile &M, + void *UserData), + bool (*PostorderVisitor)(ModuleFile &M, void *UserData), void *UserData) { SmallVector<bool, 16> Visited(size(), false); - for (unsigned I = 0, N = Chain.size(); I != N; ++I) { - if (Visited[Chain[I]->Index]) + for (unsigned I = 0, N = Roots.size(); I != N; ++I) { + if (Visited[Roots[I]->Index]) continue; - Visited[Chain[I]->Index] = true; + Visited[Roots[I]->Index] = true; - if (::visitDepthFirst(*Chain[I], Visitor, UserData, Visited)) + if (::visitDepthFirst(*Roots[I], PreorderVisitor, PostorderVisitor, UserData, Visited)) return; } } diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp index e91a7e1..0f5741b 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp @@ -1922,10 +1922,6 @@ bool CStringChecker::evalCall(const CallExpr *CE, CheckerContext &C) const { if (!evalFunction) return false; - // Make sure each function sets its own description. - // (But don't bother in a release build.) - assert(!(CurrentFunctionDescription = nullptr)); - // Check and evaluate the call. (this->*evalFunction)(C, CE); diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp index 45768b2..0beb917 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp @@ -28,6 +28,7 @@ using namespace ento; static bool isArc4RandomAvailable(const ASTContext &Ctx) { const llvm::Triple &T = Ctx.getTargetInfo().getTriple(); return T.getVendor() == llvm::Triple::Apple || + T.getOS() == llvm::Triple::CloudABI || T.getOS() == llvm::Triple::FreeBSD || T.getOS() == llvm::Triple::NetBSD || T.getOS() == llvm::Triple::OpenBSD || diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td index ba5b4fa..d1d6ac2 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td @@ -295,7 +295,7 @@ def UnixAPIChecker : Checker<"API">, HelpText<"Check calls to various UNIX/Posix functions">, DescFile<"UnixAPIChecker.cpp">; -def MallocPessimistic : Checker<"Malloc">, +def MallocChecker: Checker<"Malloc">, HelpText<"Check for memory leaks, double free, and use-after-free problems. Traces memory managed by malloc()/free().">, DescFile<"MallocChecker.cpp">; @@ -315,10 +315,6 @@ def ChrootChecker : Checker<"Chroot">, HelpText<"Check improper use of chroot">, DescFile<"ChrootChecker.cpp">; -def MallocOptimistic : Checker<"MallocWithAnnotations">, - HelpText<"Check for memory leaks, double free, and use-after-free problems. Traces memory managed by malloc()/free(). Assumes that all user-defined functions which might free a pointer are annotated.">, - DescFile<"MallocChecker.cpp">; - def PthreadLockChecker : Checker<"PthreadLock">, HelpText<"Simple lock -> unlock checker">, DescFile<"PthreadLockChecker.cpp">; diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp index c1ea767..f4be5b3 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp @@ -142,7 +142,7 @@ public: : cfg(cfg), Ctx(ctx), BR(br), Checker(checker), AC(ac), Parents(parents), Escaped(escaped), currentBlock(nullptr) {} - virtual ~DeadStoreObs() {} + ~DeadStoreObs() override {} bool isLive(const LiveVariables::LivenessValues &Live, const VarDecl *D) { if (Live.isLive(D)) diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp index 4ee0223..2e442c7 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp @@ -162,7 +162,7 @@ void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S, os.flush(); BugReport *report = new BugReport(*BT_null, - buf.empty() ? BT_null->getDescription() : buf.str(), + buf.empty() ? BT_null->getDescription() : StringRef(buf), N); bugreporter::trackNullOrUndefValue(N, bugreporter::getDerefExpr(S), *report); diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h index b7549fd..d38d63c 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h @@ -13,6 +13,8 @@ #ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_INTERCHECKERAPI_H #define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_INTERCHECKERAPI_H namespace clang { +class CheckerManager; + namespace ento { /// Register the checker which evaluates CString API calls. diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp index 1926600..02c1209 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp @@ -336,7 +336,7 @@ const ObjCIvarDecl *IvarInvalidationCheckerImpl::findPropertyBackingIvar( llvm::raw_svector_ostream os(PropNameWithUnderscore); os << '_' << PropName; } - if (IvarName == PropNameWithUnderscore.str()) + if (IvarName == PropNameWithUnderscore) return Iv; } diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp index 13ea4d3..52e2936 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp @@ -137,7 +137,7 @@ private: public: SecKeychainBugVisitor(SymbolRef S) : Sym(S) {} - virtual ~SecKeychainBugVisitor() {} + ~SecKeychainBugVisitor() override {} void Profile(llvm::FoldingSetNodeID &ID) const override { static int X = 0; @@ -292,7 +292,11 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE, // If it is a call to an allocator function, it could be a double allocation. idx = getTrackedFunctionIndex(funName, true); if (idx != InvalidIdx) { - const Expr *ArgExpr = CE->getArg(FunctionsToTrack[idx].Param); + unsigned paramIdx = FunctionsToTrack[idx].Param; + if (CE->getNumArgs() <= paramIdx) + return; + + const Expr *ArgExpr = CE->getArg(paramIdx); if (SymbolRef V = getAsPointeeSymbol(ArgExpr, C)) if (const AllocationState *AS = State->get<AllocatedData>(V)) { if (!definitelyReturnedError(AS->Region, State, C.getSValBuilder())) { @@ -325,8 +329,12 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE, if (idx == InvalidIdx) return; + unsigned paramIdx = FunctionsToTrack[idx].Param; + if (CE->getNumArgs() <= paramIdx) + return; + // Check the argument to the deallocator. - const Expr *ArgExpr = CE->getArg(FunctionsToTrack[idx].Param); + const Expr *ArgExpr = CE->getArg(paramIdx); SVal ArgSVal = State->getSVal(ArgExpr, C.getLocationContext()); // Undef is reported by another checker. @@ -499,9 +507,11 @@ MacOSKeychainAPIChecker::getAllocationNode(const ExplodedNode *N, while (N) { if (!N->getState()->get<AllocatedData>(Sym)) break; - // Allocation node, is the last node in the current context in which the - // symbol was tracked. - if (N->getLocationContext() == LeakContext) + // Allocation node, is the last node in the current or parent context in + // which the symbol was tracked. + const LocationContext *NContext = N->getLocationContext(); + if (NContext == LeakContext || + NContext->isParentOf(LeakContext)) AllocNode = N; N = N->pred_empty() ? nullptr : *(N->pred_begin()); } diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp index aee5a43..0cf0094 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp @@ -43,12 +43,15 @@ enum AllocationFamily { AF_Malloc, AF_CXXNew, AF_CXXNewArray, - AF_IfNameIndex + AF_IfNameIndex, + AF_Alloca }; class RefState { enum Kind { // Reference to allocated memory. Allocated, + // Reference to zero-allocated memory. + AllocatedOfSizeZero, // Reference to released/freed memory. Released, // The responsibility for freeing resources has transferred from @@ -61,8 +64,8 @@ class RefState { }; const Stmt *S; - unsigned K : 2; // Kind enum, but stored as a bitfield. - unsigned Family : 30; // Rest of 32-bit word, currently just an allocation + unsigned K : 3; // Kind enum, but stored as a bitfield. + unsigned Family : 29; // Rest of 32-bit word, currently just an allocation // family. RefState(Kind k, const Stmt *s, unsigned family) @@ -71,6 +74,7 @@ class RefState { } public: bool isAllocated() const { return K == Allocated; } + bool isAllocatedOfSizeZero() const { return K == AllocatedOfSizeZero; } bool isReleased() const { return K == Released; } bool isRelinquished() const { return K == Relinquished; } bool isEscaped() const { return K == Escaped; } @@ -86,6 +90,10 @@ public: static RefState getAllocated(unsigned family, const Stmt *s) { return RefState(Allocated, s, family); } + static RefState getAllocatedOfSizeZero(const RefState *RS) { + return RefState(AllocatedOfSizeZero, RS->getStmt(), + RS->getAllocationFamily()); + } static RefState getReleased(unsigned family, const Stmt *s) { return RefState(Released, s, family); } @@ -106,6 +114,7 @@ public: switch (static_cast<Kind>(K)) { #define CASE(ID) case ID: OS << #ID; break; CASE(Allocated) + CASE(AllocatedOfSizeZero) CASE(Released) CASE(Relinquished) CASE(Escaped) @@ -160,16 +169,16 @@ class MallocChecker : public Checker<check::DeadSymbols, { public: MallocChecker() - : II_malloc(nullptr), II_free(nullptr), II_realloc(nullptr), - II_calloc(nullptr), II_valloc(nullptr), II_reallocf(nullptr), - II_strndup(nullptr), II_strdup(nullptr), II_kmalloc(nullptr), - II_if_nameindex(nullptr), II_if_freenameindex(nullptr) {} + : II_alloca(nullptr), II_malloc(nullptr), II_free(nullptr), + II_realloc(nullptr), II_calloc(nullptr), II_valloc(nullptr), + II_reallocf(nullptr), II_strndup(nullptr), II_strdup(nullptr), + II_kmalloc(nullptr), II_if_nameindex(nullptr), + II_if_freenameindex(nullptr) {} /// In pessimistic mode, the checker assumes that it does not know which /// functions might free the memory. enum CheckKind { - CK_MallocPessimistic, - CK_MallocOptimistic, + CK_MallocChecker, CK_NewDeleteChecker, CK_NewDeleteLeaksChecker, CK_MismatchedDeallocatorChecker, @@ -182,6 +191,8 @@ public: MOK_Any }; + DefaultBool IsOptimistic; + DefaultBool ChecksEnabled[CK_NumCheckKinds]; CheckName CheckNames[CK_NumCheckKinds]; @@ -216,11 +227,14 @@ private: mutable std::unique_ptr<BugType> BT_Leak[CK_NumCheckKinds]; mutable std::unique_ptr<BugType> BT_UseFree[CK_NumCheckKinds]; mutable std::unique_ptr<BugType> BT_BadFree[CK_NumCheckKinds]; + mutable std::unique_ptr<BugType> BT_FreeAlloca[CK_NumCheckKinds]; mutable std::unique_ptr<BugType> BT_MismatchedDealloc; mutable std::unique_ptr<BugType> BT_OffsetFree[CK_NumCheckKinds]; - mutable IdentifierInfo *II_malloc, *II_free, *II_realloc, *II_calloc, - *II_valloc, *II_reallocf, *II_strndup, *II_strdup, - *II_kmalloc, *II_if_nameindex, *II_if_freenameindex; + mutable std::unique_ptr<BugType> BT_UseZerroAllocated[CK_NumCheckKinds]; + mutable IdentifierInfo *II_alloca, *II_malloc, *II_free, *II_realloc, + *II_calloc, *II_valloc, *II_reallocf, *II_strndup, + *II_strdup, *II_kmalloc, *II_if_nameindex, + *II_if_freenameindex; mutable Optional<uint64_t> KernelZeroFlagVal; void initIdentifierInfo(ASTContext &C) const; @@ -252,22 +266,24 @@ private: MemoryOperationKind MemKind) const; bool isStandardNewDelete(const FunctionDecl *FD, ASTContext &C) const; ///@} + + /// \brief Perform a zero-allocation check. + ProgramStateRef ProcessZeroAllocation(CheckerContext &C, const Expr *E, + const unsigned AllocationSizeArg, + ProgramStateRef State) const; + ProgramStateRef MallocMemReturnsAttr(CheckerContext &C, const CallExpr *CE, - const OwnershipAttr* Att) const; + const OwnershipAttr* Att, + ProgramStateRef State) const; static ProgramStateRef MallocMemAux(CheckerContext &C, const CallExpr *CE, - const Expr *SizeEx, SVal Init, - ProgramStateRef State, - AllocationFamily Family = AF_Malloc) { - return MallocMemAux(C, CE, - State->getSVal(SizeEx, C.getLocationContext()), - Init, State, Family); - } - + const Expr *SizeEx, SVal Init, + ProgramStateRef State, + AllocationFamily Family = AF_Malloc); static ProgramStateRef MallocMemAux(CheckerContext &C, const CallExpr *CE, - SVal SizeEx, SVal Init, - ProgramStateRef State, - AllocationFamily Family = AF_Malloc); + SVal SizeEx, SVal Init, + ProgramStateRef State, + AllocationFamily Family = AF_Malloc); // Check if this malloc() for special flags. At present that means M_ZERO or // __GFP_ZERO (in which case, treat it like calloc). @@ -281,7 +297,8 @@ private: AllocationFamily Family = AF_Malloc); ProgramStateRef FreeMemAttr(CheckerContext &C, const CallExpr *CE, - const OwnershipAttr* Att) const; + const OwnershipAttr* Att, + ProgramStateRef State) const; ProgramStateRef FreeMemAux(CheckerContext &C, const CallExpr *CE, ProgramStateRef state, unsigned Num, bool Hold, @@ -295,14 +312,19 @@ private: bool ReturnsNullOnFailure = false) const; ProgramStateRef ReallocMem(CheckerContext &C, const CallExpr *CE, - bool FreesMemOnFailure) const; - static ProgramStateRef CallocMem(CheckerContext &C, const CallExpr *CE); + bool FreesMemOnFailure, + ProgramStateRef State) const; + static ProgramStateRef CallocMem(CheckerContext &C, const CallExpr *CE, + ProgramStateRef State); ///\brief Check if the memory associated with this symbol was released. bool isReleased(SymbolRef Sym, CheckerContext &C) const; bool checkUseAfterFree(SymbolRef Sym, CheckerContext &C, const Stmt *S) const; + void checkUseZeroAllocated(SymbolRef Sym, CheckerContext &C, + const Stmt *S) const; + bool checkDoubleDelete(SymbolRef Sym, CheckerContext &C) const; /// Check if the function is known free memory, or if it is @@ -330,15 +352,20 @@ private: /// Tells if a given family/call/symbol is tracked by the current checker. /// Sets CheckKind to the kind of the checker responsible for this /// family/call/symbol. - Optional<CheckKind> getCheckIfTracked(AllocationFamily Family) const; + Optional<CheckKind> getCheckIfTracked(AllocationFamily Family, + bool IsALeakCheck = false) const; Optional<CheckKind> getCheckIfTracked(CheckerContext &C, - const Stmt *AllocDeallocStmt) const; - Optional<CheckKind> getCheckIfTracked(CheckerContext &C, SymbolRef Sym) const; + const Stmt *AllocDeallocStmt, + bool IsALeakCheck = false) const; + Optional<CheckKind> getCheckIfTracked(CheckerContext &C, SymbolRef Sym, + bool IsALeakCheck = false) const; ///@} static bool SummarizeValue(raw_ostream &os, SVal V); static bool SummarizeRegion(raw_ostream &os, const MemRegion *MR); void ReportBadFree(CheckerContext &C, SVal ArgVal, SourceRange Range, const Expr *DeallocExpr) const; + void ReportFreeAlloca(CheckerContext &C, SVal ArgVal, + SourceRange Range) const; void ReportMismatchedDealloc(CheckerContext &C, SourceRange Range, const Expr *DeallocExpr, const RefState *RS, SymbolRef Sym, bool OwnershipTransferred) const; @@ -352,6 +379,9 @@ private: void ReportDoubleDelete(CheckerContext &C, SymbolRef Sym) const; + void ReportUseZeroAllocated(CheckerContext &C, SourceRange Range, + SymbolRef Sym) const; + /// Find the location of the allocation for Sym on the path leading to the /// exploded node N. LeakInfo getAllocationSite(const ExplodedNode *N, SymbolRef Sym, @@ -384,7 +414,7 @@ private: MallocBugVisitor(SymbolRef S, bool isLeak = false) : Sym(S), Mode(Normal), FailedReallocSymbol(nullptr), IsLeak(isLeak) {} - virtual ~MallocBugVisitor() {} + ~MallocBugVisitor() override {} void Profile(llvm::FoldingSetNodeID &ID) const override { static int X = 0; @@ -396,7 +426,9 @@ private: const Stmt *Stmt) { // Did not track -> allocated. Other state (released) -> allocated. return (Stmt && (isa<CallExpr>(Stmt) || isa<CXXNewExpr>(Stmt)) && - (S && S->isAllocated()) && (!SPrev || !SPrev->isAllocated())); + (S && (S->isAllocated() || S->isAllocatedOfSizeZero())) && + (!SPrev || !(SPrev->isAllocated() || + SPrev->isAllocatedOfSizeZero()))); } inline bool isReleased(const RefState *S, const RefState *SPrev, @@ -422,7 +454,9 @@ private: // check. If we have to handle more cases here, it might be cleaner just // to track this extra bit in the state itself. return ((!Stmt || !isa<CallExpr>(Stmt)) && - (S && S->isAllocated()) && (SPrev && !SPrev->isAllocated())); + (S && (S->isAllocated() || S->isAllocatedOfSizeZero())) && + (SPrev && !(SPrev->isAllocated() || + SPrev->isAllocatedOfSizeZero()))); } PathDiagnosticPiece *VisitNode(const ExplodedNode *N, @@ -497,6 +531,7 @@ public: void MallocChecker::initIdentifierInfo(ASTContext &Ctx) const { if (II_malloc) return; + II_alloca = &Ctx.Idents.get("alloca"); II_malloc = &Ctx.Idents.get("malloc"); II_free = &Ctx.Idents.get("free"); II_realloc = &Ctx.Idents.get("realloc"); @@ -517,6 +552,9 @@ bool MallocChecker::isMemFunction(const FunctionDecl *FD, ASTContext &C) const { if (isCMemFunction(FD, C, AF_IfNameIndex, MemoryOperationKind::MOK_Any)) return true; + if (isCMemFunction(FD, C, AF_Alloca, MemoryOperationKind::MOK_Any)) + return true; + if (isStandardNewDelete(FD, C)) return true; @@ -560,12 +598,17 @@ bool MallocChecker::isCMemFunction(const FunctionDecl *FD, if (FunI == II_if_nameindex) return true; } + + if (Family == AF_Alloca && CheckAlloc) { + if (FunI == II_alloca) + return true; + } } if (Family != AF_Malloc) return false; - if (ChecksEnabled[CK_MallocOptimistic] && FD->hasAttrs()) { + if (IsOptimistic && FD->hasAttrs()) { for (const auto *I : FD->specific_attrs<OwnershipAttr>()) { OwnershipAttr::OwnershipKind OwnKind = I->getOwnKind(); if(OwnKind == OwnershipAttr::Takes || OwnKind == OwnershipAttr::Holds) { @@ -712,6 +755,8 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const { return; if (CE->getNumArgs() < 3) { State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State); + if (CE->getNumArgs() == 1) + State = ProcessZeroAllocation(C, CE, 0, State); } else if (CE->getNumArgs() == 3) { llvm::Optional<ProgramStateRef> MaybeState = performKernelMalloc(CE, C, State); @@ -731,31 +776,43 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const { if (CE->getNumArgs() < 1) return; State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State); + State = ProcessZeroAllocation(C, CE, 0, State); } else if (FunI == II_realloc) { - State = ReallocMem(C, CE, false); + State = ReallocMem(C, CE, false, State); + State = ProcessZeroAllocation(C, CE, 1, State); } else if (FunI == II_reallocf) { - State = ReallocMem(C, CE, true); + State = ReallocMem(C, CE, true, State); + State = ProcessZeroAllocation(C, CE, 1, State); } else if (FunI == II_calloc) { - State = CallocMem(C, CE); + State = CallocMem(C, CE, State); + State = ProcessZeroAllocation(C, CE, 0, State); + State = ProcessZeroAllocation(C, CE, 1, State); } else if (FunI == II_free) { State = FreeMemAux(C, CE, State, 0, false, ReleasedAllocatedMemory); } else if (FunI == II_strdup) { State = MallocUpdateRefState(C, CE, State); } else if (FunI == II_strndup) { State = MallocUpdateRefState(C, CE, State); - } - else if (isStandardNewDelete(FD, C.getASTContext())) { + } else if (FunI == II_alloca) { + State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State, + AF_Alloca); + State = ProcessZeroAllocation(C, CE, 0, State); + } else if (isStandardNewDelete(FD, C.getASTContext())) { // Process direct calls to operator new/new[]/delete/delete[] functions // as distinct from new/new[]/delete/delete[] expressions that are // processed by the checkPostStmt callbacks for CXXNewExpr and // CXXDeleteExpr. OverloadedOperatorKind K = FD->getOverloadedOperator(); - if (K == OO_New) + if (K == OO_New) { State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State, AF_CXXNew); - else if (K == OO_Array_New) + State = ProcessZeroAllocation(C, CE, 0, State); + } + else if (K == OO_Array_New) { State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State, AF_CXXNewArray); + State = ProcessZeroAllocation(C, CE, 0, State); + } else if (K == OO_Delete || K == OO_Array_Delete) State = FreeMemAux(C, CE, State, 0, false, ReleasedAllocatedMemory); else @@ -770,19 +827,18 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const { } } - if (ChecksEnabled[CK_MallocOptimistic] || - ChecksEnabled[CK_MismatchedDeallocatorChecker]) { + if (IsOptimistic || ChecksEnabled[CK_MismatchedDeallocatorChecker]) { // Check all the attributes, if there are any. // There can be multiple of these attributes. if (FD->hasAttrs()) for (const auto *I : FD->specific_attrs<OwnershipAttr>()) { switch (I->getOwnKind()) { case OwnershipAttr::Returns: - State = MallocMemReturnsAttr(C, CE, I); + State = MallocMemReturnsAttr(C, CE, I, State); break; case OwnershipAttr::Takes: case OwnershipAttr::Holds: - State = FreeMemAttr(C, CE, I); + State = FreeMemAttr(C, CE, I, State); break; } } @@ -790,6 +846,68 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const { C.addTransition(State); } +// Performs a 0-sized allocations check. +ProgramStateRef MallocChecker::ProcessZeroAllocation(CheckerContext &C, + const Expr *E, + const unsigned AllocationSizeArg, + ProgramStateRef State) const { + if (!State) + return nullptr; + + const Expr *Arg = nullptr; + + if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { + Arg = CE->getArg(AllocationSizeArg); + } + else if (const CXXNewExpr *NE = dyn_cast<CXXNewExpr>(E)) { + if (NE->isArray()) + Arg = NE->getArraySize(); + else + return State; + } + else + llvm_unreachable("not a CallExpr or CXXNewExpr"); + + assert(Arg); + + Optional<DefinedSVal> DefArgVal = + State->getSVal(Arg, C.getLocationContext()).getAs<DefinedSVal>(); + + if (!DefArgVal) + return State; + + // Check if the allocation size is 0. + ProgramStateRef TrueState, FalseState; + SValBuilder &SvalBuilder = C.getSValBuilder(); + DefinedSVal Zero = + SvalBuilder.makeZeroVal(Arg->getType()).castAs<DefinedSVal>(); + + std::tie(TrueState, FalseState) = + State->assume(SvalBuilder.evalEQ(State, *DefArgVal, Zero)); + + if (TrueState && !FalseState) { + SVal retVal = State->getSVal(E, C.getLocationContext()); + SymbolRef Sym = retVal.getAsLocSymbol(); + if (!Sym) + return State; + + const RefState *RS = State->get<RegionState>(Sym); + if (!RS) + return State; // TODO: change to assert(RS); after realloc() will + // guarantee have a RegionState attached. + + if (!RS->isAllocated()) + return State; + + return TrueState->set<RegionState>(Sym, + RefState::getAllocatedOfSizeZero(RS)); + } + + // Assume the value is non-zero going forward. + assert(FalseState); + return FalseState; +} + static QualType getDeepPointeeType(QualType T) { QualType Result = T, PointeeType = T->getPointeeType(); while (!PointeeType.isNull()) { @@ -849,6 +967,7 @@ void MallocChecker::checkPostStmt(const CXXNewExpr *NE, // existing binding. State = MallocUpdateRefState(C, NE, State, NE->isArray() ? AF_CXXNewArray : AF_CXXNew); + State = ProcessZeroAllocation(C, NE, 0, State); C.addTransition(State); } @@ -919,15 +1038,31 @@ void MallocChecker::checkPostObjCMessage(const ObjCMethodCall &Call, ProgramStateRef MallocChecker::MallocMemReturnsAttr(CheckerContext &C, const CallExpr *CE, - const OwnershipAttr *Att) const { + const OwnershipAttr *Att, + ProgramStateRef State) const { + if (!State) + return nullptr; + if (Att->getModule() != II_malloc) return nullptr; OwnershipAttr::args_iterator I = Att->args_begin(), E = Att->args_end(); if (I != E) { - return MallocMemAux(C, CE, CE->getArg(*I), UndefinedVal(), C.getState()); + return MallocMemAux(C, CE, CE->getArg(*I), UndefinedVal(), State); } - return MallocMemAux(C, CE, UnknownVal(), UndefinedVal(), C.getState()); + return MallocMemAux(C, CE, UnknownVal(), UndefinedVal(), State); +} + +ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C, + const CallExpr *CE, + const Expr *SizeEx, SVal Init, + ProgramStateRef State, + AllocationFamily Family) { + if (!State) + return nullptr; + + return MallocMemAux(C, CE, State->getSVal(SizeEx, C.getLocationContext()), + Init, State, Family); } ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C, @@ -935,6 +1070,8 @@ ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C, SVal Size, SVal Init, ProgramStateRef State, AllocationFamily Family) { + if (!State) + return nullptr; // We expect the malloc functions to return a pointer. if (!Loc::isLocType(CE->getType())) @@ -976,6 +1113,9 @@ ProgramStateRef MallocChecker::MallocUpdateRefState(CheckerContext &C, const Expr *E, ProgramStateRef State, AllocationFamily Family) { + if (!State) + return nullptr; + // Get the return value. SVal retVal = State->getSVal(E, C.getLocationContext()); @@ -992,11 +1132,14 @@ ProgramStateRef MallocChecker::MallocUpdateRefState(CheckerContext &C, ProgramStateRef MallocChecker::FreeMemAttr(CheckerContext &C, const CallExpr *CE, - const OwnershipAttr *Att) const { + const OwnershipAttr *Att, + ProgramStateRef State) const { + if (!State) + return nullptr; + if (Att->getModule() != II_malloc) return nullptr; - ProgramStateRef State = C.getState(); bool ReleasedAllocated = false; for (const auto &Arg : Att->args()) { @@ -1011,15 +1154,18 @@ ProgramStateRef MallocChecker::FreeMemAttr(CheckerContext &C, ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C, const CallExpr *CE, - ProgramStateRef state, + ProgramStateRef State, unsigned Num, bool Hold, bool &ReleasedAllocated, bool ReturnsNullOnFailure) const { + if (!State) + return nullptr; + if (CE->getNumArgs() < (Num + 1)) return nullptr; - return FreeMemAux(C, CE->getArg(Num), CE, state, Hold, + return FreeMemAux(C, CE->getArg(Num), CE, State, Hold, ReleasedAllocated, ReturnsNullOnFailure); } @@ -1065,6 +1211,9 @@ AllocationFamily MallocChecker::getAllocationFamily(CheckerContext &C, if (isCMemFunction(FD, Ctx, AF_IfNameIndex, MemoryOperationKind::MOK_Any)) return AF_IfNameIndex; + if (isCMemFunction(FD, Ctx, AF_Alloca, MemoryOperationKind::MOK_Any)) + return AF_Alloca; + return AF_None; } @@ -1129,6 +1278,7 @@ void MallocChecker::printExpectedAllocName(raw_ostream &os, CheckerContext &C, case AF_CXXNew: os << "'new'"; return; case AF_CXXNewArray: os << "'new[]'"; return; case AF_IfNameIndex: os << "'if_nameindex()'"; return; + case AF_Alloca: case AF_None: llvm_unreachable("not a deallocation expression"); } } @@ -1140,7 +1290,8 @@ void MallocChecker::printExpectedDeallocName(raw_ostream &os, case AF_CXXNew: os << "'delete'"; return; case AF_CXXNewArray: os << "'delete[]'"; return; case AF_IfNameIndex: os << "'if_freenameindex()'"; return; - case AF_None: llvm_unreachable("suspicious AF_None argument"); + case AF_Alloca: + case AF_None: llvm_unreachable("suspicious argument"); } } @@ -1152,6 +1303,9 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C, bool &ReleasedAllocated, bool ReturnsNullOnFailure) const { + if (!State) + return nullptr; + SVal ArgVal = State->getSVal(ArgExpr, C.getLocationContext()); if (!ArgVal.getAs<DefinedOrUnknownSVal>()) return nullptr; @@ -1191,8 +1345,8 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C, const MemSpaceRegion *MS = R->getMemorySpace(); - // Parameters, locals, statics, globals, and memory returned by alloca() - // shouldn't be freed. + // Parameters, locals, statics, globals, and memory returned by + // __builtin_alloca() shouldn't be freed. if (!(isa<UnknownSpaceRegion>(MS) || isa<HeapSpaceRegion>(MS))) { // FIXME: at the time this code was written, malloc() regions were // represented by conjured symbols, which are all in UnknownSpaceRegion. @@ -1201,8 +1355,12 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C, // Of course, free() can work on memory allocated outside the current // function, so UnknownSpaceRegion is always a possibility. // False negatives are better than false positives. - - ReportBadFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr); + + if (isa<AllocaRegion>(R)) + ReportFreeAlloca(C, ArgVal, ArgExpr->getSourceRange()); + else + ReportBadFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr); + return nullptr; } @@ -1218,6 +1376,12 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C, if (RsBase) { + // Memory returned by alloca() shouldn't be freed. + if (RsBase->getAllocationFamily() == AF_Alloca) { + ReportFreeAlloca(C, ArgVal, ArgExpr->getSourceRange()); + return nullptr; + } + // Check for double free first. if ((RsBase->isReleased() || RsBase->isRelinquished()) && !didPreviousFreeFail(State, SymBase, PreviousRetStatusSymbol)) { @@ -1227,7 +1391,8 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C, // If the pointer is allocated or escaped, but we are now trying to free it, // check that the call to free is proper. - } else if (RsBase->isAllocated() || RsBase->isEscaped()) { + } else if (RsBase->isAllocated() || RsBase->isAllocatedOfSizeZero() || + RsBase->isEscaped()) { // Check if an expected deallocation function matches the real one. bool DeallocMatchesAlloc = @@ -1252,7 +1417,8 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C, } } - ReleasedAllocated = (RsBase != nullptr) && RsBase->isAllocated(); + ReleasedAllocated = (RsBase != nullptr) && (RsBase->isAllocated() || + RsBase->isAllocatedOfSizeZero()); // Clean out the info on previous call to free return info. State = State->remove<FreeReturnValue>(SymBase); @@ -1281,21 +1447,26 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C, } Optional<MallocChecker::CheckKind> -MallocChecker::getCheckIfTracked(AllocationFamily Family) const { +MallocChecker::getCheckIfTracked(AllocationFamily Family, + bool IsALeakCheck) const { switch (Family) { case AF_Malloc: + case AF_Alloca: case AF_IfNameIndex: { - if (ChecksEnabled[CK_MallocOptimistic]) { - return CK_MallocOptimistic; - } else if (ChecksEnabled[CK_MallocPessimistic]) { - return CK_MallocPessimistic; - } + if (ChecksEnabled[CK_MallocChecker]) + return CK_MallocChecker; + return Optional<MallocChecker::CheckKind>(); } case AF_CXXNew: case AF_CXXNewArray: { - if (ChecksEnabled[CK_NewDeleteChecker]) { - return CK_NewDeleteChecker; + if (IsALeakCheck) { + if (ChecksEnabled[CK_NewDeleteLeaksChecker]) + return CK_NewDeleteLeaksChecker; + } + else { + if (ChecksEnabled[CK_NewDeleteChecker]) + return CK_NewDeleteChecker; } return Optional<MallocChecker::CheckKind>(); } @@ -1308,16 +1479,18 @@ MallocChecker::getCheckIfTracked(AllocationFamily Family) const { Optional<MallocChecker::CheckKind> MallocChecker::getCheckIfTracked(CheckerContext &C, - const Stmt *AllocDeallocStmt) const { - return getCheckIfTracked(getAllocationFamily(C, AllocDeallocStmt)); + const Stmt *AllocDeallocStmt, + bool IsALeakCheck) const { + return getCheckIfTracked(getAllocationFamily(C, AllocDeallocStmt), + IsALeakCheck); } Optional<MallocChecker::CheckKind> -MallocChecker::getCheckIfTracked(CheckerContext &C, SymbolRef Sym) const { - +MallocChecker::getCheckIfTracked(CheckerContext &C, SymbolRef Sym, + bool IsALeakCheck) const { const RefState *RS = C.getState()->get<RegionState>(Sym); assert(RS); - return getCheckIfTracked(RS->getAllocationFamily()); + return getCheckIfTracked(RS->getAllocationFamily(), IsALeakCheck); } bool MallocChecker::SummarizeValue(raw_ostream &os, SVal V) { @@ -1411,8 +1584,7 @@ void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal, SourceRange Range, const Expr *DeallocExpr) const { - if (!ChecksEnabled[CK_MallocOptimistic] && - !ChecksEnabled[CK_MallocPessimistic] && + if (!ChecksEnabled[CK_MallocChecker] && !ChecksEnabled[CK_NewDeleteChecker]) return; @@ -1433,23 +1605,19 @@ void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal, while (const ElementRegion *ER = dyn_cast_or_null<ElementRegion>(MR)) MR = ER->getSuperRegion(); - if (MR && isa<AllocaRegion>(MR)) - os << "Memory allocated by alloca() should not be deallocated"; - else { - os << "Argument to "; - if (!printAllocDeallocName(os, C, DeallocExpr)) - os << "deallocator"; - - os << " is "; - bool Summarized = MR ? SummarizeRegion(os, MR) - : SummarizeValue(os, ArgVal); - if (Summarized) - os << ", which is not memory allocated by "; - else - os << "not memory allocated by "; + os << "Argument to "; + if (!printAllocDeallocName(os, C, DeallocExpr)) + os << "deallocator"; - printExpectedAllocName(os, C, DeallocExpr); - } + os << " is "; + bool Summarized = MR ? SummarizeRegion(os, MR) + : SummarizeValue(os, ArgVal); + if (Summarized) + os << ", which is not memory allocated by "; + else + os << "not memory allocated by "; + + printExpectedAllocName(os, C, DeallocExpr); BugReport *R = new BugReport(*BT_BadFree[*CheckKind], os.str(), N); R->markInteresting(MR); @@ -1458,6 +1626,31 @@ void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal, } } +void MallocChecker::ReportFreeAlloca(CheckerContext &C, SVal ArgVal, + SourceRange Range) const { + + Optional<MallocChecker::CheckKind> CheckKind; + + if (ChecksEnabled[CK_MallocChecker]) + CheckKind = CK_MallocChecker; + else if (ChecksEnabled[CK_MismatchedDeallocatorChecker]) + CheckKind = CK_MismatchedDeallocatorChecker; + else + return; + + if (ExplodedNode *N = C.generateSink()) { + if (!BT_FreeAlloca[*CheckKind]) + BT_FreeAlloca[*CheckKind].reset( + new BugType(CheckNames[*CheckKind], "Free alloca()", "Memory Error")); + + BugReport *R = new BugReport(*BT_FreeAlloca[*CheckKind], + "Memory allocated by alloca() should not be deallocated", N); + R->markInteresting(ArgVal.getAsRegion()); + R->addRange(Range); + C.emitReport(R); + } +} + void MallocChecker::ReportMismatchedDealloc(CheckerContext &C, SourceRange Range, const Expr *DeallocExpr, @@ -1517,8 +1710,8 @@ void MallocChecker::ReportOffsetFree(CheckerContext &C, SVal ArgVal, SourceRange Range, const Expr *DeallocExpr, const Expr *AllocExpr) const { - if (!ChecksEnabled[CK_MallocOptimistic] && - !ChecksEnabled[CK_MallocPessimistic] && + + if (!ChecksEnabled[CK_MallocChecker] && !ChecksEnabled[CK_NewDeleteChecker]) return; @@ -1573,8 +1766,7 @@ void MallocChecker::ReportOffsetFree(CheckerContext &C, SVal ArgVal, void MallocChecker::ReportUseAfterFree(CheckerContext &C, SourceRange Range, SymbolRef Sym) const { - if (!ChecksEnabled[CK_MallocOptimistic] && - !ChecksEnabled[CK_MallocPessimistic] && + if (!ChecksEnabled[CK_MallocChecker] && !ChecksEnabled[CK_NewDeleteChecker]) return; @@ -1601,8 +1793,7 @@ void MallocChecker::ReportDoubleFree(CheckerContext &C, SourceRange Range, bool Released, SymbolRef Sym, SymbolRef PrevSym) const { - if (!ChecksEnabled[CK_MallocOptimistic] && - !ChecksEnabled[CK_MallocPessimistic] && + if (!ChecksEnabled[CK_MallocChecker] && !ChecksEnabled[CK_NewDeleteChecker]) return; @@ -1637,7 +1828,6 @@ void MallocChecker::ReportDoubleDelete(CheckerContext &C, SymbolRef Sym) const { Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym); if (!CheckKind.hasValue()) return; - assert(*CheckKind == CK_NewDeleteChecker && "invalid check kind"); if (ExplodedNode *N = C.generateSink()) { if (!BT_DoubleDelete) @@ -1653,16 +1843,49 @@ void MallocChecker::ReportDoubleDelete(CheckerContext &C, SymbolRef Sym) const { } } +void MallocChecker::ReportUseZeroAllocated(CheckerContext &C, + SourceRange Range, + SymbolRef Sym) const { + + if (!ChecksEnabled[CK_MallocChecker] && + !ChecksEnabled[CK_NewDeleteChecker]) + return; + + Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym); + + if (!CheckKind.hasValue()) + return; + + if (ExplodedNode *N = C.generateSink()) { + if (!BT_UseZerroAllocated[*CheckKind]) + BT_UseZerroAllocated[*CheckKind].reset(new BugType( + CheckNames[*CheckKind], "Use of zero allocated", "Memory Error")); + + BugReport *R = new BugReport(*BT_UseZerroAllocated[*CheckKind], + "Use of zero-allocated memory", N); + + R->addRange(Range); + if (Sym) { + R->markInteresting(Sym); + R->addVisitor(llvm::make_unique<MallocBugVisitor>(Sym)); + } + C.emitReport(R); + } +} + ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C, const CallExpr *CE, - bool FreesOnFail) const { + bool FreesOnFail, + ProgramStateRef State) const { + if (!State) + return nullptr; + if (CE->getNumArgs() < 2) return nullptr; - ProgramStateRef state = C.getState(); const Expr *arg0Expr = CE->getArg(0); const LocationContext *LCtx = C.getLocationContext(); - SVal Arg0Val = state->getSVal(arg0Expr, LCtx); + SVal Arg0Val = State->getSVal(arg0Expr, LCtx); if (!Arg0Val.getAs<DefinedOrUnknownSVal>()) return nullptr; DefinedOrUnknownSVal arg0Val = Arg0Val.castAs<DefinedOrUnknownSVal>(); @@ -1670,7 +1893,7 @@ ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C, SValBuilder &svalBuilder = C.getSValBuilder(); DefinedOrUnknownSVal PtrEQ = - svalBuilder.evalEQ(state, arg0Val, svalBuilder.makeNull()); + svalBuilder.evalEQ(State, arg0Val, svalBuilder.makeNull()); // Get the size argument. If there is no size arg then give up. const Expr *Arg1 = CE->getArg(1); @@ -1678,20 +1901,20 @@ ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C, return nullptr; // Get the value of the size argument. - SVal Arg1ValG = state->getSVal(Arg1, LCtx); + SVal Arg1ValG = State->getSVal(Arg1, LCtx); if (!Arg1ValG.getAs<DefinedOrUnknownSVal>()) return nullptr; DefinedOrUnknownSVal Arg1Val = Arg1ValG.castAs<DefinedOrUnknownSVal>(); // Compare the size argument to 0. DefinedOrUnknownSVal SizeZero = - svalBuilder.evalEQ(state, Arg1Val, + svalBuilder.evalEQ(State, Arg1Val, svalBuilder.makeIntValWithPtrWidth(0, false)); ProgramStateRef StatePtrIsNull, StatePtrNotNull; - std::tie(StatePtrIsNull, StatePtrNotNull) = state->assume(PtrEQ); + std::tie(StatePtrIsNull, StatePtrNotNull) = State->assume(PtrEQ); ProgramStateRef StateSizeIsZero, StateSizeNotZero; - std::tie(StateSizeIsZero, StateSizeNotZero) = state->assume(SizeZero); + std::tie(StateSizeIsZero, StateSizeNotZero) = State->assume(SizeZero); // We only assume exceptional states if they are definitely true; if the // state is under-constrained, assume regular realloc behavior. bool PrtIsNull = StatePtrIsNull && !StatePtrNotNull; @@ -1711,7 +1934,7 @@ ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C, // Get the from and to pointer symbols as in toPtr = realloc(fromPtr, size). assert(!PrtIsNull); SymbolRef FromPtr = arg0Val.getAsSymbol(); - SVal RetVal = state->getSVal(CE, LCtx); + SVal RetVal = State->getSVal(CE, LCtx); SymbolRef ToPtr = RetVal.getAsSymbol(); if (!FromPtr || !ToPtr) return nullptr; @@ -1731,7 +1954,7 @@ ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C, // Default behavior. if (ProgramStateRef stateFree = - FreeMemAux(C, CE, state, 0, false, ReleasedAllocated)) { + FreeMemAux(C, CE, State, 0, false, ReleasedAllocated)) { ProgramStateRef stateRealloc = MallocMemAux(C, CE, CE->getArg(1), UnknownVal(), stateFree); @@ -1755,20 +1978,23 @@ ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C, return nullptr; } -ProgramStateRef MallocChecker::CallocMem(CheckerContext &C, const CallExpr *CE){ +ProgramStateRef MallocChecker::CallocMem(CheckerContext &C, const CallExpr *CE, + ProgramStateRef State) { + if (!State) + return nullptr; + if (CE->getNumArgs() < 2) return nullptr; - ProgramStateRef state = C.getState(); SValBuilder &svalBuilder = C.getSValBuilder(); const LocationContext *LCtx = C.getLocationContext(); - SVal count = state->getSVal(CE->getArg(0), LCtx); - SVal elementSize = state->getSVal(CE->getArg(1), LCtx); - SVal TotalSize = svalBuilder.evalBinOp(state, BO_Mul, count, elementSize, + SVal count = State->getSVal(CE->getArg(0), LCtx); + SVal elementSize = State->getSVal(CE->getArg(1), LCtx); + SVal TotalSize = svalBuilder.evalBinOp(State, BO_Mul, count, elementSize, svalBuilder.getContext().getSizeType()); SVal zeroVal = svalBuilder.makeZeroVal(svalBuilder.getContext().CharTy); - return MallocMemAux(C, CE, TotalSize, zeroVal, state); + return MallocMemAux(C, CE, TotalSize, zeroVal, State); } LeakInfo @@ -1801,9 +2027,11 @@ MallocChecker::getAllocationSite(const ExplodedNode *N, SymbolRef Sym, } } - // Allocation node, is the last node in the current context in which the - // symbol was tracked. - if (N->getLocationContext() == LeakContext) + // Allocation node, is the last node in the current or parent context in + // which the symbol was tracked. + const LocationContext *NContext = N->getLocationContext(); + if (NContext == LeakContext || + NContext->isParentOf(LeakContext)) AllocNode = N; N = N->pred_empty() ? nullptr : *(N->pred_begin()); } @@ -1814,23 +2042,22 @@ MallocChecker::getAllocationSite(const ExplodedNode *N, SymbolRef Sym, void MallocChecker::reportLeak(SymbolRef Sym, ExplodedNode *N, CheckerContext &C) const { - if (!ChecksEnabled[CK_MallocOptimistic] && - !ChecksEnabled[CK_MallocPessimistic] && + if (!ChecksEnabled[CK_MallocChecker] && !ChecksEnabled[CK_NewDeleteLeaksChecker]) return; const RefState *RS = C.getState()->get<RegionState>(Sym); assert(RS && "cannot leak an untracked symbol"); AllocationFamily Family = RS->getAllocationFamily(); - Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(Family); - if (!CheckKind.hasValue()) + + if (Family == AF_Alloca) return; - // Special case for new and new[]; these are controlled by a separate checker - // flag so that they can be selectively disabled. - if (Family == AF_CXXNew || Family == AF_CXXNewArray) - if (!ChecksEnabled[CK_NewDeleteLeaksChecker]) - return; + Optional<MallocChecker::CheckKind> + CheckKind = getCheckIfTracked(Family, true); + + if (!CheckKind.hasValue()) + return; assert(N); if (!BT_Leak[*CheckKind]) { @@ -1893,7 +2120,7 @@ void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper, SmallVector<SymbolRef, 2> Errors; for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) { if (SymReaper.isDead(I->first)) { - if (I->second.isAllocated()) + if (I->second.isAllocated() || I->second.isAllocatedOfSizeZero()) Errors.push_back(I->first); // Remove the dead symbol from the map. RS = F.remove(RS, I->first); @@ -1949,8 +2176,7 @@ void MallocChecker::checkPreCall(const CallEvent &Call, return; ASTContext &Ctx = C.getASTContext(); - if ((ChecksEnabled[CK_MallocOptimistic] || - ChecksEnabled[CK_MallocPessimistic]) && + if (ChecksEnabled[CK_MallocChecker] && (isCMemFunction(FD, Ctx, AF_Malloc, MemoryOperationKind::MOK_Free) || isCMemFunction(FD, Ctx, AF_IfNameIndex, MemoryOperationKind::MOK_Free))) @@ -2062,6 +2288,15 @@ bool MallocChecker::checkUseAfterFree(SymbolRef Sym, CheckerContext &C, return false; } +void MallocChecker::checkUseZeroAllocated(SymbolRef Sym, CheckerContext &C, + const Stmt *S) const { + assert(Sym); + const RefState *RS = C.getState()->get<RegionState>(Sym); + + if (RS && RS->isAllocatedOfSizeZero()) + ReportUseZeroAllocated(C, RS->getStmt()->getSourceRange(), Sym); +} + bool MallocChecker::checkDoubleDelete(SymbolRef Sym, CheckerContext &C) const { if (isReleased(Sym, C)) { @@ -2075,8 +2310,10 @@ bool MallocChecker::checkDoubleDelete(SymbolRef Sym, CheckerContext &C) const { void MallocChecker::checkLocation(SVal l, bool isLoad, const Stmt *S, CheckerContext &C) const { SymbolRef Sym = l.getLocSymbolInBase(); - if (Sym) + if (Sym) { checkUseAfterFree(Sym, C, S); + checkUseZeroAllocated(Sym, C, S); + } } // If a symbolic region is assumed to NULL (or another constant), stop tracking @@ -2320,7 +2557,8 @@ ProgramStateRef MallocChecker::checkPointerEscapeAux(ProgramStateRef State, continue; if (const RefState *RS = State->get<RegionState>(sym)) { - if (RS->isAllocated() && CheckRefState(RS)) { + if ((RS->isAllocated() || RS->isAllocatedOfSizeZero()) && + CheckRefState(RS)) { State = State->remove<RegionState>(sym); State = State->set<RegionState>(sym, RefState::getEscaped(RS)); } @@ -2443,6 +2681,8 @@ void MallocChecker::printState(raw_ostream &Out, ProgramStateRef State, const RefState *RefS = State->get<RegionState>(I.getKey()); AllocationFamily Family = RefS->getAllocationFamily(); Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(Family); + if (!CheckKind.hasValue()) + CheckKind = getCheckIfTracked(Family, true); I.getKey()->dumpToStream(Out); Out << " : "; @@ -2457,6 +2697,8 @@ void MallocChecker::printState(raw_ostream &Out, ProgramStateRef State, void ento::registerNewDeleteLeaksChecker(CheckerManager &mgr) { registerCStringCheckerBasic(mgr); MallocChecker *checker = mgr.registerChecker<MallocChecker>(); + checker->IsOptimistic = mgr.getAnalyzerOptions().getBooleanOption( + "Optimistic", false, checker); checker->ChecksEnabled[MallocChecker::CK_NewDeleteLeaksChecker] = true; checker->CheckNames[MallocChecker::CK_NewDeleteLeaksChecker] = mgr.getCurrentCheckName(); @@ -2470,11 +2712,12 @@ void ento::registerNewDeleteLeaksChecker(CheckerManager &mgr) { void ento::register##name(CheckerManager &mgr) { \ registerCStringCheckerBasic(mgr); \ MallocChecker *checker = mgr.registerChecker<MallocChecker>(); \ + checker->IsOptimistic = mgr.getAnalyzerOptions().getBooleanOption( \ + "Optimistic", false, checker); \ checker->ChecksEnabled[MallocChecker::CK_##name] = true; \ checker->CheckNames[MallocChecker::CK_##name] = mgr.getCurrentCheckName(); \ } -REGISTER_CHECKER(MallocPessimistic) -REGISTER_CHECKER(MallocOptimistic) +REGISTER_CHECKER(MallocChecker) REGISTER_CHECKER(NewDeleteChecker) REGISTER_CHECKER(MismatchedDeallocatorChecker) diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp index f38ce77..e913479 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp @@ -142,13 +142,13 @@ private: } } } - else if (isa<MemberExpr>(E)) { + else if (const auto *ME = dyn_cast<MemberExpr>(E)) { // No points-to analysis, just look at the member - const Decl * EmeMD = dyn_cast<MemberExpr>(E)->getMemberDecl(); + const Decl *EmeMD = ME->getMemberDecl(); while (i != e) { --i; - if (isa<MemberExpr>(i->variable)) { - if (dyn_cast<MemberExpr>(i->variable)->getMemberDecl() == EmeMD) + if (const auto *ME_i = dyn_cast<MemberExpr>(i->variable)) { + if (ME_i->getMemberDecl() == EmeMD) i = toScanFor.erase (i); } } diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp index 9a460ba..58c27d4 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp @@ -94,6 +94,16 @@ public: ErrorReturnedNotOwned }; + /// Tracks how an object referenced by an ivar has been used. + /// + /// This accounts for us not knowing if an arbitrary ivar is supposed to be + /// stored at +0 or +1. + enum class IvarAccessHistory { + None, + AccessedDirectly, + ReleasedAfterDirectAccess + }; + private: /// The number of outstanding retains. unsigned Cnt; @@ -121,14 +131,16 @@ private: /// This setting should not be propagated to state derived from this state. /// Once we start deriving new states, it would be inconsistent to override /// them. - unsigned IsOverridable : 1; + unsigned RawIvarAccessHistory : 2; RefVal(Kind k, RetEffect::ObjKind o, unsigned cnt, unsigned acnt, QualType t, - bool Overridable = false) + IvarAccessHistory IvarAccess) : Cnt(cnt), ACnt(acnt), T(t), RawKind(static_cast<unsigned>(k)), - RawObjectKind(static_cast<unsigned>(o)), IsOverridable(Overridable) { + RawObjectKind(static_cast<unsigned>(o)), + RawIvarAccessHistory(static_cast<unsigned>(IvarAccess)) { assert(getKind() == k && "not enough bits for the kind"); assert(getObjKind() == o && "not enough bits for the object kind"); + assert(getIvarAccessHistory() == IvarAccess && "not enough bits"); } public: @@ -144,20 +156,24 @@ public: void clearCounts() { Cnt = 0; ACnt = 0; - IsOverridable = false; } void setCount(unsigned i) { Cnt = i; - IsOverridable = false; } void setAutoreleaseCount(unsigned i) { ACnt = i; - IsOverridable = false; } QualType getType() const { return T; } - bool isOverridable() const { return IsOverridable; } + /// Returns what the analyzer knows about direct accesses to a particular + /// instance variable. + /// + /// If the object with this refcount wasn't originally from an Objective-C + /// ivar region, this should always return IvarAccessHistory::None. + IvarAccessHistory getIvarAccessHistory() const { + return static_cast<IvarAccessHistory>(RawIvarAccessHistory); + } bool isOwned() const { return getKind() == Owned; @@ -181,7 +197,7 @@ public: /// Most commonly, this is an owned object with a retain count of +1. static RefVal makeOwned(RetEffect::ObjKind o, QualType t, unsigned Count = 1) { - return RefVal(Owned, o, Count, 0, t); + return RefVal(Owned, o, Count, 0, t, IvarAccessHistory::None); } /// Create a state for an object whose lifetime is not the responsibility of @@ -190,47 +206,49 @@ public: /// Most commonly, this is an unowned object with a retain count of +0. static RefVal makeNotOwned(RetEffect::ObjKind o, QualType t, unsigned Count = 0) { - return RefVal(NotOwned, o, Count, 0, t); - } - - /// Create an "overridable" state for an unowned object at +0. - /// - /// An overridable state is one that provides a good approximation of the - /// reference counting state now, but which may be discarded later if the - /// checker sees the object being used in new ways. - static RefVal makeOverridableNotOwned(RetEffect::ObjKind o, QualType t) { - return RefVal(NotOwned, o, 0, 0, t, /*Overridable=*/true); + return RefVal(NotOwned, o, Count, 0, t, IvarAccessHistory::None); } RefVal operator-(size_t i) const { return RefVal(getKind(), getObjKind(), getCount() - i, - getAutoreleaseCount(), getType()); + getAutoreleaseCount(), getType(), getIvarAccessHistory()); } RefVal operator+(size_t i) const { return RefVal(getKind(), getObjKind(), getCount() + i, - getAutoreleaseCount(), getType()); + getAutoreleaseCount(), getType(), getIvarAccessHistory()); } RefVal operator^(Kind k) const { return RefVal(k, getObjKind(), getCount(), getAutoreleaseCount(), - getType()); + getType(), getIvarAccessHistory()); } RefVal autorelease() const { return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount()+1, - getType()); + getType(), getIvarAccessHistory()); + } + + RefVal withIvarAccess() const { + assert(getIvarAccessHistory() == IvarAccessHistory::None); + return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount(), + getType(), IvarAccessHistory::AccessedDirectly); + } + RefVal releaseViaIvar() const { + assert(getIvarAccessHistory() == IvarAccessHistory::AccessedDirectly); + return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount(), + getType(), IvarAccessHistory::ReleasedAfterDirectAccess); } // Comparison, profiling, and pretty-printing. bool hasSameState(const RefVal &X) const { - return getKind() == X.getKind() && Cnt == X.Cnt && ACnt == X.ACnt; + return getKind() == X.getKind() && Cnt == X.Cnt && ACnt == X.ACnt && + getIvarAccessHistory() == X.getIvarAccessHistory(); } bool operator==(const RefVal& X) const { - return T == X.T && hasSameState(X) && getObjKind() == X.getObjKind() && - IsOverridable == X.IsOverridable; + return T == X.T && hasSameState(X) && getObjKind() == X.getObjKind(); } void Profile(llvm::FoldingSetNodeID& ID) const { @@ -239,7 +257,7 @@ public: ID.AddInteger(Cnt); ID.AddInteger(ACnt); ID.AddInteger(RawObjectKind); - ID.AddBoolean(IsOverridable); + ID.AddInteger(RawIvarAccessHistory); } void print(raw_ostream &Out) const; @@ -249,9 +267,6 @@ void RefVal::print(raw_ostream &Out) const { if (!T.isNull()) Out << "Tracked " << T.getAsString() << '/'; - if (isOverridable()) - Out << "(overridable) "; - switch (getKind()) { default: llvm_unreachable("Invalid RefVal kind"); case Owned: { @@ -323,8 +338,18 @@ void RefVal::print(raw_ostream &Out) const { break; } + switch (getIvarAccessHistory()) { + case IvarAccessHistory::None: + break; + case IvarAccessHistory::AccessedDirectly: + Out << " [direct ivar access]"; + break; + case IvarAccessHistory::ReleasedAfterDirectAccess: + Out << " [released after direct ivar access]"; + } + if (ACnt) { - Out << " [ARC +" << ACnt << ']'; + Out << " [autorelease -" << ACnt << ']'; } } } //end anonymous namespace @@ -1763,12 +1788,11 @@ namespace { addGCModeDescription(LOpts, GCEnabled); } - std::pair<ranges_iterator, ranges_iterator> getRanges() override { + llvm::iterator_range<ranges_iterator> getRanges() override { const CFRefBug& BugTy = static_cast<CFRefBug&>(getBugType()); if (!BugTy.isLeak()) return BugReport::getRanges(); - else - return std::make_pair(ranges_iterator(), ranges_iterator()); + return llvm::make_range(ranges_iterator(), ranges_iterator()); } }; @@ -1829,6 +1853,16 @@ static bool isNumericLiteralExpression(const Expr *E) { isa<CXXBoolLiteralExpr>(E); } +/// Returns true if this stack frame is for an Objective-C method that is a +/// property getter or setter whose body has been synthesized by the analyzer. +static bool isSynthesizedAccessor(const StackFrameContext *SFC) { + auto Method = dyn_cast_or_null<ObjCMethodDecl>(SFC->getDecl()); + if (!Method || !Method->isPropertyAccessor()) + return false; + + return SFC->getAnalysisDeclContext()->isBodyAutosynthesized(); +} + PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N, const ExplodedNode *PrevN, BugReporterContext &BRC, @@ -1859,6 +1893,11 @@ PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N, if (!PrevT) { const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt(); + if (isa<ObjCIvarRefExpr>(S) && + isSynthesizedAccessor(LCtx->getCurrentStackFrame())) { + S = LCtx->getCurrentStackFrame()->getCallSite(); + } + if (isa<ObjCArrayLiteral>(S)) { os << "NSArray literal is an object with a +0 retain count"; } @@ -1883,6 +1922,9 @@ PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N, os << "oxed expression produces an object with a +0 retain count"; } } + else if (isa<ObjCIvarRefExpr>(S)) { + os << "Object loaded from instance variable"; + } else { if (const CallExpr *CE = dyn_cast<CallExpr>(S)) { // Get the name of the callee (if it is available). @@ -2034,7 +2076,6 @@ PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N, switch (CurrV.getKind()) { case RefVal::Owned: case RefVal::NotOwned: - if (PrevV.getCount() == CurrV.getCount()) { // Did an autorelease message get sent? if (PrevV.getAutoreleaseCount() == CurrV.getAutoreleaseCount()) @@ -2062,6 +2103,11 @@ PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N, break; case RefVal::Released: + if (CurrV.getIvarAccessHistory() == + RefVal::IvarAccessHistory::ReleasedAfterDirectAccess && + CurrV.getIvarAccessHistory() != PrevV.getIvarAccessHistory()) { + os << "Strong instance variable relinquished. "; + } os << "Object released."; break; @@ -2143,7 +2189,7 @@ static AllocationInfo GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N, SymbolRef Sym) { const ExplodedNode *AllocationNode = N; - const ExplodedNode *AllocationNodeInCurrentContext = N; + const ExplodedNode *AllocationNodeInCurrentOrParentContext = N; const MemRegion *FirstBinding = nullptr; const LocationContext *LeakContext = N->getLocationContext(); @@ -2173,10 +2219,15 @@ GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N, // AllocationNode is the last node in which the symbol was tracked. AllocationNode = N; - // AllocationNodeInCurrentContext, is the last node in the current context - // in which the symbol was tracked. - if (NContext == LeakContext) - AllocationNodeInCurrentContext = N; + // AllocationNodeInCurrentContext, is the last node in the current or + // parent context in which the symbol was tracked. + // + // Note that the allocation site might be in the parent conext. For example, + // the case where an allocation happens in a block that captures a reference + // to it and that reference is overwritten/dropped by another call to + // the block. + if (NContext == LeakContext || NContext->isParentOf(LeakContext)) + AllocationNodeInCurrentOrParentContext = N; // Find the last init that was called on the given symbol and store the // init method's location context. @@ -2214,7 +2265,7 @@ GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N, FirstBinding = nullptr; } - return AllocationInfo(AllocationNodeInCurrentContext, + return AllocationInfo(AllocationNodeInCurrentOrParentContext, FirstBinding, InterestingMethodContext); } @@ -2345,20 +2396,8 @@ CFRefLeakReport::CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts, ProgramPoint P = AllocNode->getLocation(); if (Optional<CallExitEnd> Exit = P.getAs<CallExitEnd>()) AllocStmt = Exit->getCalleeContext()->getCallSite(); - else { - // We are going to get a BlockEdge when the leak and allocation happen in - // different, non-nested frames (contexts). For example, the case where an - // allocation happens in a block that captures a reference to it and - // that reference is overwritten/dropped by another call to the block. - if (Optional<BlockEdge> Edge = P.getAs<BlockEdge>()) { - if (Optional<CFGStmt> St = Edge->getDst()->front().getAs<CFGStmt>()) { - AllocStmt = St->getStmt(); - } - } - else { - AllocStmt = P.castAs<PostStmt>().getStmt(); - } - } + else + AllocStmt = P.castAs<PostStmt>().getStmt(); assert(AllocStmt && "Cannot find allocation statement"); PathDiagnosticLocation AllocLocation = @@ -2436,9 +2475,7 @@ public: : ShouldResetSummaryLog(false), IncludeAllocationLine(shouldIncludeAllocationSiteInLeakDiagnostics(AO)) {} - virtual ~RetainCountChecker() { - DeleteContainerSeconds(DeadSymbolTags); - } + ~RetainCountChecker() override { DeleteContainerSeconds(DeadSymbolTags); } void checkEndAnalysis(ExplodedGraph &G, BugReporter &BR, ExprEngine &Eng) const { @@ -2774,17 +2811,64 @@ void RetainCountChecker::checkPostStmt(const ObjCBoxedExpr *Ex, C.addTransition(State); } +static bool wasLoadedFromIvar(SymbolRef Sym) { + if (auto DerivedVal = dyn_cast<SymbolDerived>(Sym)) + return isa<ObjCIvarRegion>(DerivedVal->getRegion()); + if (auto RegionVal = dyn_cast<SymbolRegionValue>(Sym)) + return isa<ObjCIvarRegion>(RegionVal->getRegion()); + return false; +} + void RetainCountChecker::checkPostStmt(const ObjCIvarRefExpr *IRE, CheckerContext &C) const { + Optional<Loc> IVarLoc = C.getSVal(IRE).getAs<Loc>(); + if (!IVarLoc) + return; + ProgramStateRef State = C.getState(); - // If an instance variable was previously accessed through a property, - // it may have a synthesized refcount of +0. Override right now that we're - // doing direct access. - if (Optional<Loc> IVarLoc = C.getSVal(IRE).getAs<Loc>()) - if (SymbolRef Sym = State->getSVal(*IVarLoc).getAsSymbol()) - if (const RefVal *RV = getRefBinding(State, Sym)) - if (RV->isOverridable()) - State = removeRefBinding(State, Sym); + SymbolRef Sym = State->getSVal(*IVarLoc).getAsSymbol(); + if (!Sym || !wasLoadedFromIvar(Sym)) + return; + + // Accessing an ivar directly is unusual. If we've done that, be more + // forgiving about what the surrounding code is allowed to do. + + QualType Ty = Sym->getType(); + RetEffect::ObjKind Kind; + if (Ty->isObjCRetainableType()) + Kind = RetEffect::ObjC; + else if (coreFoundation::isCFObjectRef(Ty)) + Kind = RetEffect::CF; + else + return; + + // If the value is already known to be nil, don't bother tracking it. + ConstraintManager &CMgr = State->getConstraintManager(); + if (CMgr.isNull(State, Sym).isConstrainedTrue()) + return; + + if (const RefVal *RV = getRefBinding(State, Sym)) { + // If we've seen this symbol before, or we're only seeing it now because + // of something the analyzer has synthesized, don't do anything. + if (RV->getIvarAccessHistory() != RefVal::IvarAccessHistory::None || + isSynthesizedAccessor(C.getStackFrame())) { + return; + } + + // Note that this value has been loaded from an ivar. + C.addTransition(setRefBinding(State, Sym, RV->withIvarAccess())); + return; + } + + RefVal PlusZero = RefVal::makeNotOwned(Kind, Ty); + + // In a synthesized accessor, the effective retain count is +0. + if (isSynthesizedAccessor(C.getStackFrame())) { + C.addTransition(setRefBinding(State, Sym, PlusZero)); + return; + } + + State = setRefBinding(State, Sym, PlusZero.withIvarAccess()); C.addTransition(State); } @@ -2828,16 +2912,6 @@ static QualType GetReturnType(const Expr *RetE, ASTContext &Ctx) { return RetTy; } -static bool wasSynthesizedProperty(const ObjCMethodCall *Call, - ExplodedNode *N) { - if (!Call || !Call->getDecl()->isPropertyAccessor()) - return false; - - CallExitEnd PP = N->getLocation().castAs<CallExitEnd>(); - const StackFrameContext *Frame = PP.getCalleeContext(); - return Frame->getAnalysisDeclContext()->isBodyAutosynthesized(); -} - // We don't always get the exact modeling of the function with regards to the // retain count checker even when the function is inlined. For example, we need // to stop tracking the symbols which were marked with StopTrackingHard. @@ -2872,19 +2946,6 @@ void RetainCountChecker::processSummaryOfInlined(const RetainSummary &Summ, SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol(); if (Sym) state = removeRefBinding(state, Sym); - } else if (RE.getKind() == RetEffect::NotOwnedSymbol) { - if (wasSynthesizedProperty(MsgInvocation, C.getPredecessor())) { - // Believe the summary if we synthesized the body of a property getter - // and the return value is currently untracked. If the corresponding - // instance variable is later accessed directly, however, we're going to - // want to override this state, so that the owning object can perform - // reference counting operations on its own ivars. - SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol(); - if (Sym && !getRefBinding(state, Sym)) - state = setRefBinding(state, Sym, - RefVal::makeOverridableNotOwned(RE.getObjKind(), - Sym->getType())); - } } C.addTransition(state); @@ -3125,11 +3186,16 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym, case RefVal::Owned: assert(V.getCount() > 0); - if (V.getCount() == 1) - V = V ^ (E == DecRefBridgedTransferred ? RefVal::NotOwned - : RefVal::Released); - else if (E == DecRefAndStopTrackingHard) + if (V.getCount() == 1) { + if (E == DecRefBridgedTransferred || + V.getIvarAccessHistory() == + RefVal::IvarAccessHistory::AccessedDirectly) + V = V ^ RefVal::NotOwned; + else + V = V ^ RefVal::Released; + } else if (E == DecRefAndStopTrackingHard) { return removeRefBinding(state, sym); + } V = V - 1; break; @@ -3139,6 +3205,13 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym, if (E == DecRefAndStopTrackingHard) return removeRefBinding(state, sym); V = V - 1; + } else if (V.getIvarAccessHistory() == + RefVal::IvarAccessHistory::AccessedDirectly) { + // Assume that the instance variable was holding on the object at + // +1, and we just didn't know. + if (E == DecRefAndStopTrackingHard) + return removeRefBinding(state, sym); + V = V.releaseViaIvar() ^ RefVal::Released; } else { V = V ^ RefVal::ErrorReleaseNotOwned; hasErr = V.getKind(); @@ -3162,6 +3235,16 @@ void RetainCountChecker::processNonLeakError(ProgramStateRef St, RefVal::Kind ErrorKind, SymbolRef Sym, CheckerContext &C) const { + // HACK: Ignore retain-count issues on values accessed through ivars, + // because of cases like this: + // [_contentView retain]; + // [_contentView removeFromSuperview]; + // [self addSubview:_contentView]; // invalidates 'self' + // [_contentView release]; + if (const RefVal *RV = getRefBinding(St, Sym)) + if (RV->getIvarAccessHistory() != RefVal::IvarAccessHistory::None) + return; + ExplodedNode *N = C.generateSink(St); if (!N) return; @@ -3229,7 +3312,7 @@ bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const { // See if it's one of the specific functions we know how to eval. bool canEval = false; - QualType ResultTy = CE->getCallReturnType(); + QualType ResultTy = CE->getCallReturnType(C.getASTContext()); if (ResultTy->isObjCIdType()) { // Handle: id NSMakeCollectable(CFTypeRef) canEval = II->isStr("NSMakeCollectable"); @@ -3388,6 +3471,15 @@ void RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S, RetEffect RE, RefVal X, SymbolRef Sym, ProgramStateRef state) const { + // HACK: Ignore retain-count issues on values accessed through ivars, + // because of cases like this: + // [_contentView retain]; + // [_contentView removeFromSuperview]; + // [self addSubview:_contentView]; // invalidates 'self' + // [_contentView release]; + if (X.getIvarAccessHistory() != RefVal::IvarAccessHistory::None) + return; + // Any leaks or other errors? if (X.isReturnedOwned() && X.getCount() == 0) { if (RE.getKind() != RetEffect::NoRet) { @@ -3428,22 +3520,31 @@ void RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S, } } else if (X.isReturnedNotOwned()) { if (RE.isOwned()) { - // Trying to return a not owned object to a caller expecting an - // owned object. - state = setRefBinding(state, Sym, X ^ RefVal::ErrorReturnedNotOwned); - - static CheckerProgramPointTag ReturnNotOwnedTag(this, - "ReturnNotOwnedForOwned"); - ExplodedNode *N = C.addTransition(state, Pred, &ReturnNotOwnedTag); - if (N) { - if (!returnNotOwnedForOwned) - returnNotOwnedForOwned.reset(new ReturnedNotOwnedForOwned(this)); - - CFRefReport *report = - new CFRefReport(*returnNotOwnedForOwned, - C.getASTContext().getLangOpts(), - C.isObjCGCEnabled(), SummaryLog, N, Sym); - C.emitReport(report); + if (X.getIvarAccessHistory() == + RefVal::IvarAccessHistory::AccessedDirectly) { + // Assume the method was trying to transfer a +1 reference from a + // strong ivar to the caller. + state = setRefBinding(state, Sym, + X.releaseViaIvar() ^ RefVal::ReturnedOwned); + } else { + // Trying to return a not owned object to a caller expecting an + // owned object. + state = setRefBinding(state, Sym, X ^ RefVal::ErrorReturnedNotOwned); + + static CheckerProgramPointTag + ReturnNotOwnedTag(this, "ReturnNotOwnedForOwned"); + + ExplodedNode *N = C.addTransition(state, Pred, &ReturnNotOwnedTag); + if (N) { + if (!returnNotOwnedForOwned) + returnNotOwnedForOwned.reset(new ReturnedNotOwnedForOwned(this)); + + CFRefReport *report = + new CFRefReport(*returnNotOwnedForOwned, + C.getASTContext().getLangOpts(), + C.isObjCGCEnabled(), SummaryLog, N, Sym); + C.emitReport(report); + } } } } @@ -3594,6 +3695,14 @@ RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state, if (V.getKind() == RefVal::ReturnedOwned) ++Cnt; + // If we would over-release here, but we know the value came from an ivar, + // assume it was a strong ivar that's just been relinquished. + if (ACnt > Cnt && + V.getIvarAccessHistory() == RefVal::IvarAccessHistory::AccessedDirectly) { + V = V.releaseViaIvar(); + --ACnt; + } + if (ACnt <= Cnt) { if (ACnt == Cnt) { V.clearCounts(); @@ -3608,6 +3717,15 @@ RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state, return setRefBinding(state, Sym, V); } + // HACK: Ignore retain-count issues on values accessed through ivars, + // because of cases like this: + // [_contentView retain]; + // [_contentView removeFromSuperview]; + // [self addSubview:_contentView]; // invalidates 'self' + // [_contentView release]; + if (V.getIvarAccessHistory() != RefVal::IvarAccessHistory::None) + return state; + // Woah! More autorelease counts then retain counts left. // Emit hard error. V = V ^ RefVal::ErrorOverAutorelease; @@ -3641,11 +3759,22 @@ ProgramStateRef RetainCountChecker::handleSymbolDeath(ProgramStateRef state, SymbolRef sid, RefVal V, SmallVectorImpl<SymbolRef> &Leaked) const { - bool hasLeak = false; - if (V.isOwned()) + bool hasLeak; + + // HACK: Ignore retain-count issues on values accessed through ivars, + // because of cases like this: + // [_contentView retain]; + // [_contentView removeFromSuperview]; + // [self addSubview:_contentView]; // invalidates 'self' + // [_contentView release]; + if (V.getIvarAccessHistory() != RefVal::IvarAccessHistory::None) + hasLeak = false; + else if (V.isOwned()) hasLeak = true; else if (V.isNotOwned() || V.isReturnedOwned()) hasLeak = (V.getCount() > 0); + else + hasLeak = false; if (!hasLeak) return removeRefBinding(state, sid); diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp index d717e3f..1696bcf 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp @@ -13,12 +13,14 @@ //===----------------------------------------------------------------------===// #include "clang/StaticAnalyzer/Core/AnalyzerOptions.h" +#include "clang/StaticAnalyzer/Core/Checker.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" using namespace clang; +using namespace ento; using namespace llvm; AnalyzerOptions::UserModeKind AnalyzerOptions::getUserMode() { @@ -100,12 +102,37 @@ AnalyzerOptions::mayInlineCXXMemberFunction(CXXInlineableMemberKind K) { static StringRef toString(bool b) { return b ? "true" : "false"; } -bool AnalyzerOptions::getBooleanOption(StringRef Name, bool DefaultVal) { +StringRef AnalyzerOptions::getCheckerOption(StringRef CheckerName, + StringRef OptionName, + StringRef Default, + bool SearchInParents) { + // Search for a package option if the option for the checker is not specified + // and search in parents is enabled. + ConfigTable::const_iterator E = Config.end(); + do { + ConfigTable::const_iterator I = + Config.find((Twine(CheckerName) + ":" + OptionName).str()); + if (I != E) + return StringRef(I->getValue()); + size_t Pos = CheckerName.rfind('.'); + if (Pos == StringRef::npos) + return Default; + CheckerName = CheckerName.substr(0, Pos); + } while (!CheckerName.empty() && SearchInParents); + return Default; +} + +bool AnalyzerOptions::getBooleanOption(StringRef Name, bool DefaultVal, + const CheckerBase *C, + bool SearchInParents) { // FIXME: We should emit a warning here if the value is something other than // "true", "false", or the empty string (meaning the default value), // but the AnalyzerOptions doesn't have access to a diagnostic engine. + StringRef Default = toString(DefaultVal); StringRef V = - Config.insert(std::make_pair(Name, toString(DefaultVal))).first->second; + C ? getCheckerOption(C->getTagDescription(), Name, Default, + SearchInParents) + : StringRef(Config.insert(std::make_pair(Name, Default)).first->second); return llvm::StringSwitch<bool>(V) .Case("true", true) .Case("false", false) @@ -113,9 +140,10 @@ bool AnalyzerOptions::getBooleanOption(StringRef Name, bool DefaultVal) { } bool AnalyzerOptions::getBooleanOption(Optional<bool> &V, StringRef Name, - bool DefaultVal) { + bool DefaultVal, const CheckerBase *C, + bool SearchInParents) { if (!V.hasValue()) - V = getBooleanOption(Name, DefaultVal); + V = getBooleanOption(Name, DefaultVal, C, SearchInParents); return V.getValue(); } @@ -199,19 +227,35 @@ bool AnalyzerOptions::shouldWriteStableReportFilename() { /* Default = */ false); } -int AnalyzerOptions::getOptionAsInteger(StringRef Name, int DefaultVal) { +int AnalyzerOptions::getOptionAsInteger(StringRef Name, int DefaultVal, + const CheckerBase *C, + bool SearchInParents) { SmallString<10> StrBuf; llvm::raw_svector_ostream OS(StrBuf); OS << DefaultVal; - StringRef V = Config.insert(std::make_pair(Name, OS.str())).first->second; + StringRef V = C ? getCheckerOption(C->getTagDescription(), Name, OS.str(), + SearchInParents) + : StringRef(Config.insert(std::make_pair(Name, OS.str())) + .first->second); + int Res = DefaultVal; bool b = V.getAsInteger(10, Res); assert(!b && "analyzer-config option should be numeric"); - (void) b; + (void)b; return Res; } +StringRef AnalyzerOptions::getOptionAsString(StringRef Name, + StringRef DefaultVal, + const CheckerBase *C, + bool SearchInParents) { + return C ? getCheckerOption(C->getTagDescription(), Name, DefaultVal, + SearchInParents) + : StringRef( + Config.insert(std::make_pair(Name, DefaultVal)).first->second); +} + unsigned AnalyzerOptions::getAlwaysInlineSize() { if (!AlwaysInlineSize.hasValue()) AlwaysInlineSize = getOptionAsInteger("ipa-always-inline-size", 3); @@ -281,4 +325,3 @@ bool AnalyzerOptions::shouldPrunePaths() { bool AnalyzerOptions::shouldConditionalizeStaticInitializers() { return getBooleanOption("cfg-conditional-static-initializers", true); } - diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp index dff81e3..97e97ef 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp @@ -2702,22 +2702,22 @@ const Stmt *BugReport::getStmt() const { return S; } -std::pair<BugReport::ranges_iterator, BugReport::ranges_iterator> -BugReport::getRanges() { - // If no custom ranges, add the range of the statement corresponding to - // the error node. - if (Ranges.empty()) { - if (const Expr *E = dyn_cast_or_null<Expr>(getStmt())) - addRange(E->getSourceRange()); - else - return std::make_pair(ranges_iterator(), ranges_iterator()); - } +llvm::iterator_range<BugReport::ranges_iterator> BugReport::getRanges() { + // If no custom ranges, add the range of the statement corresponding to + // the error node. + if (Ranges.empty()) { + if (const Expr *E = dyn_cast_or_null<Expr>(getStmt())) + addRange(E->getSourceRange()); + else + return llvm::make_range(ranges_iterator(), ranges_iterator()); + } - // User-specified absence of range info. - if (Ranges.size() == 1 && !Ranges.begin()->isValid()) - return std::make_pair(ranges_iterator(), ranges_iterator()); + // User-specified absence of range info. + if (Ranges.size() == 1 && !Ranges.begin()->isValid()) + return llvm::make_range(ranges_iterator(), ranges_iterator()); - return std::make_pair(Ranges.begin(), Ranges.end()); + return llvm::iterator_range<BugReport::ranges_iterator>(Ranges.begin(), + Ranges.end()); } PathDiagnosticLocation BugReport::getLocation(const SourceManager &SM) const { @@ -2763,9 +2763,7 @@ void BugReporter::FlushReports() { // warnings and new BugTypes. // FIXME: Only NSErrorChecker needs BugType's FlushReports. // Turn NSErrorChecker into a proper checker and remove this. - SmallVector<const BugType*, 16> bugTypes; - for (BugTypesTy::iterator I=BugTypes.begin(), E=BugTypes.end(); I!=E; ++I) - bugTypes.push_back(*I); + SmallVector<const BugType *, 16> bugTypes(BugTypes.begin(), BugTypes.end()); for (SmallVectorImpl<const BugType *>::iterator I = bugTypes.begin(), E = bugTypes.end(); I != E; ++I) const_cast<BugType*>(*I)->FlushReports(*this); @@ -3055,8 +3053,7 @@ static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM) { // Now take the pieces and construct a new PathDiagnostic. path.clear(); - for (PiecesTy::iterator I=Pieces.begin(), E=Pieces.end(); I!=E; ++I) - path.push_back(*I); + path.insert(path.end(), Pieces.begin(), Pieces.end()); } bool GRBugReporter::generatePathDiagnostic(PathDiagnostic& PD, @@ -3434,10 +3431,8 @@ void BugReporter::FlushReport(BugReport *exampleReport, PathDiagnosticLocation L = exampleReport->getLocation(getSourceManager()); auto piece = llvm::make_unique<PathDiagnosticEventPiece>( L, exampleReport->getDescription()); - BugReport::ranges_iterator Beg, End; - std::tie(Beg, End) = exampleReport->getRanges(); - for ( ; Beg != End; ++Beg) - piece->addRange(*Beg); + for (const SourceRange &Range : exampleReport->getRanges()) + piece->addRange(Range); D->setEndOfPath(std::move(piece)); } diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp index 2d56bd0..b906cc9 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp @@ -111,15 +111,14 @@ std::unique_ptr<PathDiagnosticPiece> BugReporterVisitor::getDefaultEndPath( PathDiagnosticLocation L = PathDiagnosticLocation::createEndOfPath(EndPathNode,BRC.getSourceManager()); - BugReport::ranges_iterator Beg, End; - std::tie(Beg, End) = BR.getRanges(); + const auto &Ranges = BR.getRanges(); // Only add the statement itself as a range if we didn't specify any // special ranges for this report. - auto P = llvm::make_unique<PathDiagnosticEventPiece>(L, BR.getDescription(), - Beg == End); - for (; Beg != End; ++Beg) - P->addRange(*Beg); + auto P = llvm::make_unique<PathDiagnosticEventPiece>( + L, BR.getDescription(), Ranges.begin() == Ranges.end()); + for (const SourceRange &Range : Ranges) + P->addRange(Range); return std::move(P); } diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Checker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Checker.cpp index 1a3965a..2235211 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Checker.cpp +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Checker.cpp @@ -36,11 +36,3 @@ raw_ostream& clang::ento::operator<<(raw_ostream &Out, Out << Checker.getCheckName().getName(); return Out; } - -void Checker<check::_VoidCheck, check::_VoidCheck, check::_VoidCheck, - check::_VoidCheck, check::_VoidCheck, check::_VoidCheck, - check::_VoidCheck, check::_VoidCheck, check::_VoidCheck, - check::_VoidCheck, check::_VoidCheck, check::_VoidCheck, - check::_VoidCheck, check::_VoidCheck, check::_VoidCheck, - check::_VoidCheck, check::_VoidCheck, check::_VoidCheck - >::anchor() { } diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp index 4699df8..8b7f18f 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp @@ -1901,6 +1901,9 @@ void ExprEngine::VisitLvalArraySubscriptExpr(const ArraySubscriptExpr *A, getCheckerManager().runCheckersForPreStmt(checkerPreStmt, Pred, A, *this); StmtNodeBuilder Bldr(checkerPreStmt, Dst, *currBldrCtx); + assert(A->isGLValue() || + (!AMgr.getLangOpts().CPlusPlus && + A->getType().isCForbiddenLValueType())); for (ExplodedNodeSet::iterator it = checkerPreStmt.begin(), ei = checkerPreStmt.end(); it != ei; ++it) { @@ -1909,7 +1912,6 @@ void ExprEngine::VisitLvalArraySubscriptExpr(const ArraySubscriptExpr *A, SVal V = state->getLValue(A->getType(), state->getSVal(Idx, LCtx), state->getSVal(Base, LCtx)); - assert(A->isGLValue()); Bldr.generateNode(A, *it, state->BindExpr(A, LCtx, V), nullptr, ProgramPoint::PostLValueKind); } @@ -2646,17 +2648,6 @@ struct DOTGraphTraits<ExplodedNode*> : } // end llvm namespace #endif -#ifndef NDEBUG -template <typename ITERATOR> -ExplodedNode *GetGraphNode(ITERATOR I) { return *I; } - -template <> ExplodedNode* -GetGraphNode<llvm::DenseMap<ExplodedNode*, Expr*>::iterator> - (llvm::DenseMap<ExplodedNode*, Expr*>::iterator I) { - return I->first; -} -#endif - void ExprEngine::ViewGraph(bool trim) { #ifndef NDEBUG if (trim) { diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp index ffda527..1777ea9 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp @@ -31,7 +31,7 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B, ExplodedNodeSet Tmp2; getCheckerManager().runCheckersForPreStmt(CheckedSet, Pred, B, *this); - // With both the LHS and RHS evaluated, process the operation itself. + // With both the LHS and RHS evaluated, process the operation itself. for (ExplodedNodeSet::iterator it=CheckedSet.begin(), ei=CheckedSet.end(); it != ei; ++it) { diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp index 88b5464..3c1a3b4 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp @@ -45,7 +45,7 @@ class HTMLDiagnostics : public PathDiagnosticConsumer { public: HTMLDiagnostics(AnalyzerOptions &AnalyzerOpts, const std::string& prefix, const Preprocessor &pp); - virtual ~HTMLDiagnostics() { FlushDiagnostics(nullptr); } + ~HTMLDiagnostics() override { FlushDiagnostics(nullptr); } void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags, FilesMade *filesMade) override; @@ -282,7 +282,7 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D, llvm::sys::path::append(Model, Directory, "report-%%%%%%.html"); if (std::error_code EC = - llvm::sys::fs::createUniqueFile(Model.str(), FD, ResultPath)) { + llvm::sys::fs::createUniqueFile(Model, FD, ResultPath)) { llvm::errs() << "warning: could not create file in '" << Directory << "': " << EC.message() << '\n'; return; @@ -302,12 +302,12 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D, << "-" << i << ".html"; llvm::sys::path::append(Model, Directory, filename.str()); - EC = llvm::sys::fs::openFileForWrite(Model.str(), + EC = llvm::sys::fs::openFileForWrite(Model, FD, llvm::sys::fs::F_RW | llvm::sys::fs::F_Excl); if (EC && EC != std::errc::file_exists) { - llvm::errs() << "warning: could not create file '" << Model.str() + llvm::errs() << "warning: could not create file '" << Model << "': " << EC.message() << '\n'; return; } diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp index 76cead6..1fa6754 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp @@ -1372,10 +1372,11 @@ void BlockDataRegion::LazyInitializeReferencedVars() { return; AnalysisDeclContext *AC = getCodeRegion()->getAnalysisDeclContext(); - AnalysisDeclContext::referenced_decls_iterator I, E; - std::tie(I, E) = AC->getReferencedBlockVars(BC->getDecl()); + const auto &ReferencedBlockVars = AC->getReferencedBlockVars(BC->getDecl()); + auto NumBlockVars = + std::distance(ReferencedBlockVars.begin(), ReferencedBlockVars.end()); - if (I == E) { + if (NumBlockVars == 0) { ReferencedVars = (void*) 0x1; return; } @@ -1386,14 +1387,14 @@ void BlockDataRegion::LazyInitializeReferencedVars() { typedef BumpVector<const MemRegion*> VarVec; VarVec *BV = (VarVec*) A.Allocate<VarVec>(); - new (BV) VarVec(BC, E - I); + new (BV) VarVec(BC, NumBlockVars); VarVec *BVOriginal = (VarVec*) A.Allocate<VarVec>(); - new (BVOriginal) VarVec(BC, E - I); + new (BVOriginal) VarVec(BC, NumBlockVars); - for ( ; I != E; ++I) { + for (const VarDecl *VD : ReferencedBlockVars) { const VarRegion *VR = nullptr; const VarRegion *OriginalVR = nullptr; - std::tie(VR, OriginalVR) = getCaptureRegions(*I); + std::tie(VR, OriginalVR) = getCaptureRegions(VD); assert(VR); assert(OriginalVR); BV->push_back(VR, BC); diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp index b971fff..c490031 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp @@ -432,11 +432,15 @@ void PathDiagnosticConsumer::FlushDiagnostics( // Sort the diagnostics so that they are always emitted in a deterministic // order. - if (!BatchDiags.empty()) - std::sort(BatchDiags.begin(), BatchDiags.end(), - [](const PathDiagnostic *X, const PathDiagnostic *Y) { - return X != Y && compare(*X, *Y); - }); + int (*Comp)(const PathDiagnostic *const *, const PathDiagnostic *const *) = + [](const PathDiagnostic *const *X, const PathDiagnostic *const *Y) { + assert(*X != *Y && "PathDiagnostics not uniqued!"); + if (compare(**X, **Y)) + return -1; + assert(compare(**Y, **X) && "Not a total order!"); + return 1; + }; + array_pod_sort(BatchDiags.begin(), BatchDiags.end(), Comp); FlushDiagnosticsImpl(BatchDiags, Files); @@ -452,7 +456,7 @@ void PathDiagnosticConsumer::FlushDiagnostics( } PathDiagnosticConsumer::FilesMade::~FilesMade() { - for (PDFileEntry &Entry : *this) + for (PDFileEntry &Entry : Set) Entry.~PDFileEntry(); } @@ -462,11 +466,11 @@ void PathDiagnosticConsumer::FilesMade::addDiagnostic(const PathDiagnostic &PD, llvm::FoldingSetNodeID NodeID; NodeID.Add(PD); void *InsertPos; - PDFileEntry *Entry = FindNodeOrInsertPos(NodeID, InsertPos); + PDFileEntry *Entry = Set.FindNodeOrInsertPos(NodeID, InsertPos); if (!Entry) { Entry = Alloc.Allocate<PDFileEntry>(); Entry = new (Entry) PDFileEntry(NodeID); - InsertNode(Entry, InsertPos); + Set.InsertNode(Entry, InsertPos); } // Allocate persistent storage for the file name. @@ -483,7 +487,7 @@ PathDiagnosticConsumer::FilesMade::getFiles(const PathDiagnostic &PD) { llvm::FoldingSetNodeID NodeID; NodeID.Add(PD); void *InsertPos; - PDFileEntry *Entry = FindNodeOrInsertPos(NodeID, InsertPos); + PDFileEntry *Entry = Set.FindNodeOrInsertPos(NodeID, InsertPos); if (!Entry) return nullptr; return &Entry->files; diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp index a2c66f8..e0aff58 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp @@ -37,7 +37,7 @@ namespace { const LangOptions &LangOpts, bool supportsMultipleFiles); - virtual ~PlistDiagnostics() {} + ~PlistDiagnostics() override {} void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags, FilesMade *filesMade) override; @@ -106,13 +106,14 @@ static void ReportControlFlow(raw_ostream &o, // by forcing to use only the beginning of the range. This simplifies the layout // logic for clients. Indent(o, indent) << "<key>start</key>\n"; - SourceLocation StartEdge = I->getStart().asRange().getBegin(); - EmitRange(o, SM, LangOpts, CharSourceRange::getTokenRange(StartEdge), FM, + SourceRange StartEdge( + SM.getExpansionLoc(I->getStart().asRange().getBegin())); + EmitRange(o, SM, Lexer::getAsCharRange(StartEdge, SM, LangOpts), FM, indent + 1); Indent(o, indent) << "<key>end</key>\n"; - SourceLocation EndEdge = I->getEnd().asRange().getBegin(); - EmitRange(o, SM, LangOpts, CharSourceRange::getTokenRange(EndEdge), FM, + SourceRange EndEdge(SM.getExpansionLoc(I->getEnd().asRange().getBegin())); + EmitRange(o, SM, Lexer::getAsCharRange(EndEdge, SM, LangOpts), FM, indent + 1); --indent; @@ -154,7 +155,7 @@ static void ReportEvent(raw_ostream &o, const PathDiagnosticPiece& P, FullSourceLoc L = P.getLocation().asLocation(); Indent(o, indent) << "<key>location</key>\n"; - EmitLocation(o, SM, LangOpts, L, FM, indent); + EmitLocation(o, SM, L, FM, indent); // Output the ranges (if any). ArrayRef<SourceRange> Ranges = P.getRanges(); @@ -163,11 +164,10 @@ static void ReportEvent(raw_ostream &o, const PathDiagnosticPiece& P, Indent(o, indent) << "<key>ranges</key>\n"; Indent(o, indent) << "<array>\n"; ++indent; - for (ArrayRef<SourceRange>::iterator I = Ranges.begin(), E = Ranges.end(); - I != E; ++I) { - EmitRange(o, SM, LangOpts, CharSourceRange::getTokenRange(*I), FM, - indent + 1); - } + for (auto &R : Ranges) + EmitRange(o, SM, + Lexer::getAsCharRange(SM.getExpansionRange(R), SM, LangOpts), + FM, indent + 1); --indent; Indent(o, indent) << "</array>\n"; } @@ -387,7 +387,9 @@ void PlistDiagnostics::FlushDiagnosticsImpl( EmitString(o, D->getCategory()) << '\n'; o << " <key>type</key>"; EmitString(o, D->getBugType()) << '\n'; - + o << " <key>check_name</key>"; + EmitString(o, D->getCheckName()) << '\n'; + // Output information about the semantic context where // the issue occurred. if (const Decl *DeclWithIssue = D->getDeclWithIssue()) { @@ -453,7 +455,7 @@ void PlistDiagnostics::FlushDiagnosticsImpl( // Output the location of the bug. o << " <key>location</key>\n"; - EmitLocation(o, *SM, LangOpts, D->getLocation().asLocation(), FM, 2); + EmitLocation(o, *SM, D->getLocation().asLocation(), FM, 2); // Output the diagnostic to the sub-diagnostic client, if any. if (!filesMade->empty()) { diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp index 4505622..6d41fc2 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp @@ -767,7 +767,7 @@ static inline bool isUnionField(const FieldRegion *FR) { typedef SmallVector<const FieldDecl *, 8> FieldVector; -void getSymbolicOffsetFields(BindingKey K, FieldVector &Fields) { +static void getSymbolicOffsetFields(BindingKey K, FieldVector &Fields) { assert(K.hasSymbolicOffset() && "Not implemented for concrete offset keys"); const MemRegion *Base = K.getConcreteOffsetRegion(); diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h index a72d1d4..135cd4e 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h @@ -27,7 +27,7 @@ class SimpleConstraintManager : public ConstraintManager { public: SimpleConstraintManager(SubEngine *subengine, SValBuilder &SB) : SU(subengine), SVB(SB) {} - virtual ~SimpleConstraintManager(); + ~SimpleConstraintManager() override; //===------------------------------------------------------------------===// // Common implementation for the interface provided by ConstraintManager. diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp index df9e4d6..b3cab87c 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp @@ -29,7 +29,7 @@ public: SimpleSValBuilder(llvm::BumpPtrAllocator &alloc, ASTContext &context, ProgramStateManager &stateMgr) : SValBuilder(alloc, context, stateMgr) {} - virtual ~SimpleSValBuilder() {} + ~SimpleSValBuilder() override {} SVal evalMinus(NonLoc val) override; SVal evalComplement(NonLoc val) override; diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp index 183ef35..fbeffb8 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp @@ -92,7 +92,7 @@ class ClangDiagPathDiagConsumer : public PathDiagnosticConsumer { public: ClangDiagPathDiagConsumer(DiagnosticsEngine &Diag) : Diag(Diag), IncludePath(false) {} - virtual ~ClangDiagPathDiagConsumer() {} + ~ClangDiagPathDiagConsumer() override {} StringRef getName() const override { return "ClangDiags"; } bool supportsLogicalOpControlFlow() const override { return true; } @@ -199,7 +199,7 @@ public: } } - ~AnalysisConsumer() { + ~AnalysisConsumer() override { if (Opts->PrintStats) delete TUTotalTimer; } @@ -373,8 +373,7 @@ public: return true; } - virtual void - AddDiagnosticConsumer(PathDiagnosticConsumer *Consumer) override { + void AddDiagnosticConsumer(PathDiagnosticConsumer *Consumer) override { PathConsumers.push_back(Consumer); } @@ -590,7 +589,7 @@ AnalysisConsumer::getModeForDecl(Decl *D, AnalysisMode Mode) { // - System headers: don't run any checks. SourceManager &SM = Ctx->getSourceManager(); SourceLocation SL = SM.getExpansionLoc(D->getLocation()); - if (!Opts->AnalyzeAll && !SM.isInMainFile(SL)) { + if (!Opts->AnalyzeAll && !SM.isWrittenInMainFile(SL)) { if (SL.isInvalid() || SM.isInSystemHeader(SL)) return AM_None; return Mode & ~AM_Path; @@ -724,7 +723,7 @@ class UbigraphViz : public ExplodedNode::Auditor { public: UbigraphViz(std::unique_ptr<raw_ostream> Out, StringRef Filename); - ~UbigraphViz(); + ~UbigraphViz() override; void AddEdge(ExplodedNode *Src, ExplodedNode *Dst) override; }; @@ -735,7 +734,7 @@ static std::unique_ptr<ExplodedNode::Auditor> CreateUbiViz() { SmallString<128> P; int FD; llvm::sys::fs::createTemporaryFile("llvm_ubi", "", FD, P); - llvm::errs() << "Writing '" << P.str() << "'.\n"; + llvm::errs() << "Writing '" << P << "'.\n"; auto Stream = llvm::make_unique<llvm::raw_fd_ostream>(FD, true); diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp index 36565cb..b3ff797 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp @@ -125,7 +125,7 @@ ento::createCheckerManager(AnalyzerOptions &opts, const LangOptions &langOpts, } - return std::move(checkerMgr); + return checkerMgr; } void ento::printCheckerHelp(raw_ostream &out, ArrayRef<std::string> plugins) { diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/ModelInjector.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/ModelInjector.h index fd24e32..e23bf8a 100644 --- a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/ModelInjector.h +++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/ModelInjector.h @@ -43,8 +43,8 @@ namespace ento { class ModelInjector : public CodeInjector { public: ModelInjector(CompilerInstance &CI); - Stmt *getBody(const FunctionDecl *D); - Stmt *getBody(const ObjCMethodDecl *D); + Stmt *getBody(const FunctionDecl *D) override; + Stmt *getBody(const ObjCMethodDecl *D) override; private: /// \brief Synthesize a body for a declaration diff --git a/contrib/llvm/tools/clang/lib/Tooling/CommonOptionsParser.cpp b/contrib/llvm/tools/clang/lib/Tooling/CommonOptionsParser.cpp index 91c74a4..adae178 100644 --- a/contrib/llvm/tools/clang/lib/Tooling/CommonOptionsParser.cpp +++ b/contrib/llvm/tools/clang/lib/Tooling/CommonOptionsParser.cpp @@ -54,6 +54,7 @@ const char *const CommonOptionsParser::HelpMessage = "\tsuffix of a path in the compile command database.\n" "\n"; +namespace { class ArgumentsAdjustingCompilations : public CompilationDatabase { public: ArgumentsAdjustingCompilations( @@ -89,6 +90,7 @@ private: return Commands; } }; +} // namespace CommonOptionsParser::CommonOptionsParser(int &argc, const char **argv, cl::OptionCategory &Category, @@ -112,15 +114,7 @@ CommonOptionsParser::CommonOptionsParser(int &argc, const char **argv, cl::desc("Additional argument to prepend to the compiler command line"), cl::cat(Category)); - // Hide unrelated options. - StringMap<cl::Option*> Options; - cl::getRegisteredOptions(Options); - for (StringMap<cl::Option *>::iterator I = Options.begin(), E = Options.end(); - I != E; ++I) { - if (I->second->Category != &Category && I->first() != "help" && - I->first() != "version") - I->second->setHiddenFlag(cl::ReallyHidden); - } + cl::HideUnrelatedOptions(Category); Compilations.reset(FixedCompilationDatabase::loadFromCommandLine(argc, argv)); diff --git a/contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp b/contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp index 7613988..2514f02 100644 --- a/contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp +++ b/contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp @@ -29,9 +29,8 @@ #include "llvm/Support/Path.h" #include <sstream> #include <system_error> - -namespace clang { -namespace tooling { +using namespace clang; +using namespace tooling; CompilationDatabase::~CompilationDatabase() {} @@ -109,6 +108,7 @@ CompilationDatabase::autoDetectFromDirectory(StringRef SourceDir, CompilationDatabasePlugin::~CompilationDatabasePlugin() {} +namespace { // Helper for recursively searching through a chain of actions and collecting // all inputs, direct and indirect, of compile jobs. struct CompileJobAnalyzer { @@ -156,8 +156,8 @@ public: // recording for our own purposes. UnusedInputDiagConsumer(DiagnosticConsumer *Other) : Other(Other) {} - virtual void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel, - const Diagnostic &Info) override { + void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel, + const Diagnostic &Info) override { if (Info.getID() == clang::diag::warn_drv_input_file_unused) { // Arg 1 for this diagnostic is the option that didn't get used. UnusedInputs.push_back(Info.getArgStdStr(0)); @@ -183,6 +183,7 @@ struct MatchesAny { private: ArrayRef<std::string> Arr; }; +} // namespace /// \brief Strips any positional args and possible argv[0] from a command-line /// provided by the user to construct a FixedCompilationDatabase. @@ -282,11 +283,9 @@ static bool stripPositionalArgs(std::vector<const char *> Args, return true; } -FixedCompilationDatabase * -FixedCompilationDatabase::loadFromCommandLine(int &Argc, - const char **Argv, - Twine Directory) { - const char **DoubleDash = std::find(Argv, Argv + Argc, StringRef("--")); +FixedCompilationDatabase *FixedCompilationDatabase::loadFromCommandLine( + int &Argc, const char *const *Argv, Twine Directory) { + const char *const *DoubleDash = std::find(Argv, Argv + Argc, StringRef("--")); if (DoubleDash == Argv + Argc) return nullptr; std::vector<const char *> CommandLine(DoubleDash + 1, Argv + Argc); @@ -324,6 +323,9 @@ FixedCompilationDatabase::getAllCompileCommands() const { return std::vector<CompileCommand>(); } +namespace clang { +namespace tooling { + // This anchor is used to force the linker to link in the generated object file // and thus register the JSONCompilationDatabasePlugin. extern volatile int JSONAnchorSource; diff --git a/contrib/llvm/tools/clang/lib/Tooling/Core/Replacement.cpp b/contrib/llvm/tools/clang/lib/Tooling/Core/Replacement.cpp index 525f7df..b9fc92b 100644 --- a/contrib/llvm/tools/clang/lib/Tooling/Core/Replacement.cpp +++ b/contrib/llvm/tools/clang/lib/Tooling/Core/Replacement.cpp @@ -77,11 +77,11 @@ bool Replacement::apply(Rewriter &Rewrite) const { } std::string Replacement::toString() const { - std::string result; - llvm::raw_string_ostream stream(result); - stream << FilePath << ": " << ReplacementRange.getOffset() << ":+" + std::string Result; + llvm::raw_string_ostream Stream(Result); + Stream << FilePath << ": " << ReplacementRange.getOffset() << ":+" << ReplacementRange.getLength() << ":\"" << ReplacementText << "\""; - return result; + return Stream.str(); } bool operator<(const Replacement &LHS, const Replacement &RHS) { diff --git a/contrib/llvm/tools/clang/lib/Tooling/FileMatchTrie.cpp b/contrib/llvm/tools/clang/lib/Tooling/FileMatchTrie.cpp index dc9999e..86ed036 100644 --- a/contrib/llvm/tools/clang/lib/Tooling/FileMatchTrie.cpp +++ b/contrib/llvm/tools/clang/lib/Tooling/FileMatchTrie.cpp @@ -17,18 +17,20 @@ #include "llvm/Support/Path.h" #include "llvm/Support/raw_ostream.h" #include <sstream> +using namespace clang; +using namespace tooling; -namespace clang { -namespace tooling { - +namespace { /// \brief Default \c PathComparator using \c llvm::sys::fs::equivalent(). struct DefaultPathComparator : public PathComparator { - virtual ~DefaultPathComparator() {} bool equivalent(StringRef FileA, StringRef FileB) const override { return FileA == FileB || llvm::sys::fs::equivalent(FileA, FileB); } }; +} +namespace clang { +namespace tooling { /// \brief A node of the \c FileMatchTrie. /// /// Each node has storage for up to one path and a map mapping a path segment to @@ -156,6 +158,8 @@ private: // The children of this node stored in a map based on the next path segment. llvm::StringMap<FileMatchTrieNode> Children; }; +} // end namespace tooling +} // end namespace clang FileMatchTrie::FileMatchTrie() : Root(new FileMatchTrieNode), Comparator(new DefaultPathComparator()) {} @@ -183,6 +187,3 @@ StringRef FileMatchTrie::findEquivalent(StringRef FileName, Error << "Path is ambiguous"; return Result; } - -} // end namespace tooling -} // end namespace clang diff --git a/contrib/llvm/tools/clang/lib/Tooling/JSONCompilationDatabase.cpp b/contrib/llvm/tools/clang/lib/Tooling/JSONCompilationDatabase.cpp index 3b5f7e2..7dc211e 100644 --- a/contrib/llvm/tools/clang/lib/Tooling/JSONCompilationDatabase.cpp +++ b/contrib/llvm/tools/clang/lib/Tooling/JSONCompilationDatabase.cpp @@ -176,7 +176,7 @@ JSONCompilationDatabase::getCompileCommands(StringRef FilePath) const { std::string Error; llvm::raw_string_ostream ES(Error); - StringRef Match = MatchTrie.findEquivalent(NativeFilePath.str(), ES); + StringRef Match = MatchTrie.findEquivalent(NativeFilePath, ES); if (Match.empty()) return std::vector<CompileCommand>(); llvm::StringMap< std::vector<CompileCommandRef> >::const_iterator @@ -307,13 +307,13 @@ bool JSONCompilationDatabase::parse(std::string &ErrorMessage) { SmallString<128> AbsolutePath( Directory->getValue(DirectoryStorage)); llvm::sys::path::append(AbsolutePath, FileName); - llvm::sys::path::native(AbsolutePath.str(), NativeFilePath); + llvm::sys::path::native(AbsolutePath, NativeFilePath); } else { llvm::sys::path::native(FileName, NativeFilePath); } IndexByFile[NativeFilePath].push_back( CompileCommandRef(Directory, Command)); - MatchTrie.insert(NativeFilePath.str()); + MatchTrie.insert(NativeFilePath); } return true; } diff --git a/contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp b/contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp index 60371fb..e100003 100644 --- a/contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp +++ b/contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp @@ -90,7 +90,7 @@ static const llvm::opt::ArgStringList *getCC1Arguments( } /// \brief Returns a clang build invocation initialized from the CC1 flags. -static clang::CompilerInvocation *newInvocation( +clang::CompilerInvocation *newInvocation( clang::DiagnosticsEngine *Diagnostics, const llvm::opt::ArgStringList &CC1Args) { assert(!CC1Args.empty() && "Must at least contain the program name!"); |